Merge remote-tracking branch 'origin/DLAB-1565-aws' into DLAB-1565-aws
diff --git a/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
index 2c283f5..3864229 100644
--- a/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
+++ b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
@@ -1322,7 +1322,7 @@
         ec2_client = boto3.client('ec2', region_name=args.region)
         efs_client = boto3.client('efs', region_name=args.region)
         route53_client = boto3.client('route53')
-    tag_name = args.service_base_name + '-Tag'
+    tag_name = args.service_base_name + '-tag'
     pre_defined_vpc = True
     pre_defined_subnet = True
     pre_defined_sg = True
diff --git a/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py b/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
index c37da53..741ca18 100644
--- a/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
+++ b/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
@@ -41,7 +41,7 @@
 parser.add_argument('--spark_version', type=str, default='')
 parser.add_argument('--hadoop_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -162,7 +162,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/edge/fabfile.py b/infrastructure-provisioning/src/edge/fabfile.py
index edbed71..66a656b 100644
--- a/infrastructure-provisioning/src/edge/fabfile.py
+++ b/infrastructure-provisioning/src/edge/fabfile.py
@@ -45,44 +45,6 @@
         sys.exit(1)
 
 
-#def run():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
-#                                               os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_prepare'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed preparing Edge node.", str(err))
-#        sys.exit(1)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_configure'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed configuring Edge node.", str(err))
-#        sys.exit(1)
-
-
-# Main function for terminating EDGE node and exploratory environment if exists
-#def terminate():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#    try:
-#        local("~/scripts/{}.py".format('edge_terminate'))
-#    except Exception as err:
-#       traceback.print_exc()
-#        append_result("Failed terminating Edge node.", str(err))
-#        sys.exit(1)
-
-
 # Main function for stopping EDGE node
 def stop():
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
@@ -111,41 +73,3 @@
         traceback.print_exc()
         append_result("Failed starting Edge node.", str(err))
         sys.exit(1)
-
-
-#def recreate():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
-#                                               os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_prepare'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed preparing Edge node.", str(err))
-#        sys.exit(1)
-#
-#    try:
-#        local("~/scripts/{}.py".format('edge_configure'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed configuring Edge node.", str(err))
-#        sys.exit(1)
-
-#def reupload_key():
-#    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
-#                                               os.environ['request_id'])
-#    local_log_filepath = "/logs/edge/" + local_log_filename
-#    logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
-#                        level=logging.DEBUG,
-#                        filename=local_log_filepath)
-#
-#    try:
-#        local("~/scripts/{}.py".format('reupload_ssh_key'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed to reupload key on Edge node.", str(err))
-#        sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
index adb2661..9519053 100644
--- a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
@@ -306,10 +306,10 @@
     client = boto3.client('ec2')
     try:
         route_tables = client.describe_route_tables(
-            Filters=[{'Name': 'tag:{}-Tag'.format(service_base_name), 'Values': ['{}'.format(
+            Filters=[{'Name': 'tag:{}-tag'.format(service_base_name), 'Values': ['{}'.format(
                 service_base_name)]}]).get('RouteTables')
         route_tables2 = client.describe_route_tables(Filters=[
-            {'Name': 'tag:{}-secondary-Tag'.format(service_base_name), 'Values': ['{}'.format(
+            {'Name': 'tag:{}-secondary-tag'.format(service_base_name), 'Values': ['{}'.format(
                 service_base_name)]}]).get('RouteTables')
         for table in route_tables:
             routes = table.get('Routes')
@@ -344,7 +344,7 @@
     try:
         ec2 = boto3.resource('ec2')
         client = boto3.client('ec2')
-        tag = {"Key": service_base_name + '-Tag', "Value": service_base_name}
+        tag = {"Key": service_base_name + '-tag', "Value": service_base_name}
         tag_name = {"Key": 'Name', "Value": "{0}-peering-connection".format(service_base_name)}
         peering = ec2.create_vpc_peering_connection(PeerVpcId=vpc_id, VpcId=vpc2_id)
         client.accept_vpc_peering_connection(VpcPeeringConnectionId=peering.id)
@@ -510,7 +510,7 @@
         cluster = client.list_instances(ClusterId=cluster_id)
         instances = cluster['Instances']
         for instance in instances:
-            instance_tag = {'Key': os.environ['conf_service_base_name'] + '-Tag',
+            instance_tag = {'Key': os.environ['conf_service_base_name'] + '-tag',
                             'Value': node_name}
             tag_intance_volume(instance['Ec2InstanceId'], node_name, instance_tag)
     except Exception as err:
@@ -888,37 +888,42 @@
         traceback.print_exc(file=sys.stdout)
 
 
-def remove_all_iam_resources(instance_type, scientist=''):
+def remove_all_iam_resources(instance_type, project_name='', endpoint_name=''):
     try:
         client = boto3.client('iam')
-        service_base_name = os.environ['conf_service_base_name'].lower().replace('-', '_')
+        service_base_name = os.environ['conf_service_base_name']
         roles_list = []
+        if project_name:
+            start_prefix = '{}-{}-{}-'.format(service_base_name, project_name, endpoint_name)
+        else:
+            start_prefix = '{}-'.format(service_base_name)
         for item in client.list_roles(MaxItems=250).get("Roles"):
-            if item.get("RoleName").startswith(service_base_name + '-'):
+            if item.get("RoleName").startswith(start_prefix):
                 roles_list.append(item.get('RoleName'))
         if roles_list:
             roles_list.sort(reverse=True)
             for iam_role in roles_list:
-                if '-ssn-Role' in iam_role and instance_type == 'ssn' or instance_type == 'all':
+                if '-ssn-role' in iam_role and instance_type == 'ssn' or instance_type == 'all':
                     try:
-                        client.delete_role_policy(RoleName=iam_role, PolicyName='{0}-ssn-Policy'.format(
+                        client.delete_role_policy(RoleName=iam_role, PolicyName='{0}-ssn-policy'.format(
                             service_base_name))
                     except:
-                        print('There is no policy {}-ssn-Policy to delete'.format(service_base_name))
+                        print('There is no policy {}-ssn-policy to delete'.format(service_base_name))
                     role_profiles = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
                     if role_profiles:
                         for i in role_profiles:
                             role_profile_name = i.get('InstanceProfileName')
-                            if role_profile_name == '{0}-ssn-Profile'.format(service_base_name):
+                            if role_profile_name == '{0}-ssn-profile'.format(service_base_name):
                                 remove_roles_and_profiles(iam_role, role_profile_name)
                     else:
                         print("There is no instance profile for {}".format(iam_role))
                         client.delete_role(RoleName=iam_role)
                         print("The IAM role {} has been deleted successfully".format(iam_role))
-                if '-edge-Role' in iam_role:
-                    if instance_type == 'edge' and scientist in iam_role:
+                if '-edge-role' in iam_role:
+                    if instance_type == 'edge' and project_name in iam_role:
                         remove_detach_iam_policies(iam_role, 'delete')
-                        role_profile_name = '{0}-{1}-edge-Profile'.format(service_base_name, scientist)
+                        role_profile_name = '{0}-{1}-{2}-edge-profile'.format(service_base_name, project_name,
+                                                                              os.environ['endpoint_name'].lower())
                         try:
                             client.get_instance_profile(InstanceProfileName=role_profile_name)
                             remove_roles_and_profiles(iam_role, role_profile_name)
@@ -938,10 +943,11 @@
                             print("There is no instance profile for {}".format(iam_role))
                             client.delete_role(RoleName=iam_role)
                             print("The IAM role {} has been deleted successfully".format(iam_role))
-                if '-nb-de-Role' in iam_role:
-                    if instance_type == 'notebook' and scientist in iam_role:
+                if '-nb-de-role' in iam_role:
+                    if instance_type == 'notebook' and project_name in iam_role:
                         remove_detach_iam_policies(iam_role)
-                        role_profile_name = '{0}-{1}-{2}-nb-de-Profile'.format(service_base_name, scientist, os.environ['endpoint_name'])
+                        role_profile_name = '{0}-{1}-{2}-nb-de-profile'.format(service_base_name, project_name,
+                                                                               os.environ['endpoint_name'].lower())
                         try:
                             client.get_instance_profile(InstanceProfileName=role_profile_name)
                             remove_roles_and_profiles(iam_role, role_profile_name)
@@ -965,22 +971,22 @@
             print("There are no IAM roles to delete. Checking instance profiles...")
         profile_list = []
         for item in client.list_instance_profiles(MaxItems=250).get("InstanceProfiles"):
-            if item.get("InstanceProfileName").startswith('{}-'.format(service_base_name)):
+            if item.get("InstanceProfileName").startswith(start_prefix):
                 profile_list.append(item.get('InstanceProfileName'))
         if profile_list:
             for instance_profile in profile_list:
-                if '-ssn-Profile' in instance_profile and instance_type == 'ssn' or instance_type == 'all':
+                if '-ssn-profile' in instance_profile and instance_type == 'ssn' or instance_type == 'all':
                     client.delete_instance_profile(InstanceProfileName=instance_profile)
                     print("The instance profile {} has been deleted successfully".format(instance_profile))
-                if '-edge-Profile' in instance_profile:
-                    if instance_type == 'edge' and scientist in instance_profile:
+                if '-edge-profile' in instance_profile:
+                    if instance_type == 'edge' and project_name in instance_profile:
                         client.delete_instance_profile(InstanceProfileName=instance_profile)
                         print("The instance profile {} has been deleted successfully".format(instance_profile))
                     if instance_type == 'all':
                         client.delete_instance_profile(InstanceProfileName=instance_profile)
                         print("The instance profile {} has been deleted successfully".format(instance_profile))
-                if '-nb-de-Profile' in instance_profile:
-                    if instance_type == 'notebook' and scientist in instance_profile:
+                if '-nb-de-profile' in instance_profile:
+                    if instance_type == 'notebook' and project_name in instance_profile:
                         client.delete_instance_profile(InstanceProfileName=instance_profile)
                         print("The instance profile {} has been deleted successfully".format(instance_profile))
                     if instance_type == 'all':
@@ -1034,7 +1040,7 @@
             if bucket_name in item.get('Name'):
                 for i in client.get_bucket_tagging(Bucket=item.get('Name')).get('TagSet'):
                     i.get('Key')
-                    if i.get('Key') == os.environ['conf_service_base_name'] + '-Tag':
+                    if i.get('Key') == os.environ['conf_service_base_name'].lower() + '-tag':
                         bucket_list.append(item.get('Name'))
         for s3bucket in bucket_list:
             if s3bucket:
@@ -1057,8 +1063,8 @@
     try:
         ec2 = boto3.resource('ec2')
         client = boto3.client('ec2')
-        tag_name = os.environ['conf_service_base_name'] + '-Tag'
-        tag2_name = os.environ['conf_service_base_name'] + '-secondary-Tag'
+        tag_name = os.environ['conf_service_base_name'].lower() + '-tag'
+        tag2_name = os.environ['conf_service_base_name'].lower() + '-secondary-tag'
         subnets = ec2.subnets.filter(
             Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [tag_value]}])
         subnets2 = ec2.subnets.filter(
@@ -1084,7 +1090,7 @@
 def remove_peering(tag_value):
     try:
         client = boto3.client('ec2')
-        tag_name = os.environ['conf_service_base_name'] + '-Tag'
+        tag_name = os.environ['conf_service_base_name'].lower() + '-tag'
         if os.environ['conf_duo_vpc_enable'] == 'true':
             peering_id = client.describe_vpc_peering_connections(Filters=[
                 {'Name': 'tag-key', 'Values': [tag_name]},
diff --git a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
index 9c4c636..8cac3c4 100644
--- a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
@@ -521,7 +521,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-ssn-disk0'.format(service_base_name),
+                            'name': '{}-ssn-volume-primary'.format(service_base_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -531,7 +531,7 @@
                         }
                     },
                     'os_profile': {
-                        'computer_name': instance_name,
+                        'computer_name': instance_name.replace('_', '-'),
                         'admin_username': dlab_ssh_user_name,
                         'linux_configuration': {
                             'disable_password_authentication': True,
@@ -568,7 +568,8 @@
                             },
                             'os_disk': {
                                 'os_type': 'Linux',
-                                'name': '{}-{}-edge-disk0'.format(service_base_name, project_name),
+                                'name': '{}-{}-{}-edge-volume-primary'.format(service_base_name, project_name,
+                                                                              os.environ['endpoint_name'].lower()),
                                 'create_option': create_option,
                                 'disk_size_gb': int(primary_disk_size),
                                 'tags': tags,
@@ -578,7 +579,7 @@
                             }
                         },
                         'os_profile': {
-                            'computer_name': instance_name,
+                            'computer_name': instance_name.replace('_', '-'),
                             'admin_username': dlab_ssh_user_name,
                             'linux_configuration': {
                                 'disable_password_authentication': True,
@@ -608,7 +609,8 @@
                         'storage_profile': {
                             'os_disk': {
                                 'os_type': 'Linux',
-                                'name': '{}-{}-edge-disk0'.format(service_base_name, project_name),
+                                'name': '{}-{}-{}-edge-volume-primary'.format(service_base_name, project_name,
+                                                                              os.environ['endpoint_name'].lower()),
                                 'create_option': create_option,
                                 'disk_size_gb': int(primary_disk_size),
                                 'tags': tags,
@@ -637,7 +639,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -648,11 +650,11 @@
                         'data_disks': [
                             {
                                 'lun': 1,
-                                'name': '{}-disk1'.format(instance_name),
+                                'name': '{}-volume-secondary'.format(instance_name),
                                 'create_option': 'empty',
                                 'disk_size_gb': 32,
                                 'tags': {
-                                    'Name': '{}-disk1'.format(instance_name)
+                                    'Name': '{}-volume-secondary'.format(instance_name)
                                 },
                                 'managed_disk': {
                                     'storage_account_type': instance_storage_account_type
@@ -667,7 +669,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -684,7 +686,7 @@
                     },
                     'storage_profile': storage_profile,
                     'os_profile': {
-                        'computer_name': instance_name,
+                        'computer_name': instance_name.replace('_', '-'),
                         'admin_username': dlab_ssh_user_name,
                         'linux_configuration': {
                             'disable_password_authentication': True,
@@ -712,7 +714,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -731,7 +733,7 @@
                         },
                         'os_disk': {
                             'os_type': 'Linux',
-                            'name': '{}-disk0'.format(instance_name),
+                            'name': '{}-volume-primary'.format(instance_name),
                             'create_option': 'fromImage',
                             'disk_size_gb': int(primary_disk_size),
                             'tags': tags,
@@ -748,7 +750,7 @@
                     },
                     'storage_profile': storage_profile,
                     'os_profile': {
-                        'computer_name': instance_name,
+                        'computer_name': instance_name.replace('_', '-'),
                         'admin_username': dlab_ssh_user_name,
                         'linux_configuration': {
                             'disable_password_authentication': True,
@@ -1082,19 +1084,20 @@
                 spark_jars_paths = sudo('cat /opt/spark/conf/spark-defaults.conf | grep -e "^spark.jars " ')
             except:
                 spark_jars_paths = None
-        user_storage_account_tag = os.environ['conf_service_base_name'] + '-' + (os.environ['project_name'].lower().replace('_', '-')).\
-            replace('_', '-') + '-' + os.environ['endpoint_name'].lower().replace('_', '-') + '-storage'
-        shared_storage_account_tag = '{0}-{1}-shared-storage'.format(os.environ['conf_service_base_name'],
-                                                                     os.environ['endpoint_name'])
+        user_storage_account_tag = "{}-{}-{}-bucket".format(os.environ['conf_service_base_name'],
+                                                            os.environ['project_name'].lower(),
+                                                            os.environ['endpoint_name'].lower())
+        shared_storage_account_tag = '{0}-{1}-shared-bucket'.format(os.environ['conf_service_base_name'],
+                                                                    os.environ['endpoint_name'].lower())
         for storage_account in meta_lib.AzureMeta().list_storage_accounts(os.environ['azure_resource_group_name']):
             if user_storage_account_tag == storage_account.tags["Name"]:
                 user_storage_account_name = storage_account.name
-                user_storage_account_key = meta_lib.AzureMeta().list_storage_keys(os.environ['azure_resource_group_name'],
-                                                                                  user_storage_account_name)[0]
+                user_storage_account_key = meta_lib.AzureMeta().list_storage_keys(
+                    os.environ['azure_resource_group_name'], user_storage_account_name)[0]
             if shared_storage_account_tag == storage_account.tags["Name"]:
                 shared_storage_account_name = storage_account.name
-                shared_storage_account_key = meta_lib.AzureMeta().list_storage_keys(os.environ['azure_resource_group_name'],
-                                                                                    shared_storage_account_name)[0]
+                shared_storage_account_key = meta_lib.AzureMeta().list_storage_keys(
+                    os.environ['azure_resource_group_name'], shared_storage_account_name)[0]
         if os.environ['azure_datalake_enable'] == 'false':
             put(templates_dir + 'core-site-storage.xml', '/tmp/core-site.xml')
         else:
diff --git a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
index ba55bc4..5b318c8 100644
--- a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
@@ -39,6 +39,7 @@
 import dlab.common_lib
 import backoff
 import ast
+import random
 
 
 class GCPActions:
@@ -292,6 +293,10 @@
         unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
         service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
         access_configs = ''
+        if instance_class == 'edge':
+            ip_forward = True
+        else:
+            ip_forward = False
         if instance_class == 'ssn' or instance_class == 'edge':
             access_configs = [{
                 "type": "ONE_TO_ONE_NAT",
@@ -373,6 +378,7 @@
             "name": instance_name,
             "machineType": "zones/{}/machineTypes/{}".format(zone, instance_size),
             "labels": labels,
+            "canIpForward": ip_forward,
             "networkInterfaces": [
                 {
                     "network": "global/networks/{}".format(vpc_name),
@@ -554,7 +560,9 @@
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
 
-    def set_role_to_service_account(self, service_account_name, role_name, service_base_name, role_type='custom'):
+    def set_role_to_service_account(self, service_account_name, role_name, service_base_name, role_type='custom',
+                                    num=0):
+        num += 1
         request = GCPActions().service_resource.projects().getIamPolicy(resource=self.project, body={})
         project_policy = request.execute()
         unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
@@ -577,6 +585,10 @@
         try:
             return request.execute()
         except Exception as err:
+            if "There were concurrent policy changes. " \
+               "Please retry the whole read-modify-write with exponential backoff." in str(err) and num <= 10:
+                time.sleep(random.randint(5, 20))
+                self.set_role_to_service_account(service_base_name, role_name, service_base_name, role_type, num)
             logging.info(
                 "Unable to set Service account policy: " + str(err) + "\n Traceback: " + traceback.print_exc(
                     file=sys.stdout))
diff --git a/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py b/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
index 1cc24b8..cc16028 100644
--- a/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
+++ b/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
@@ -172,7 +172,8 @@
             traceback.print_exc(file=sys.stdout)
 
     def get_instance(self, instance_name):
-        request = self.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
+        meta = GCPMeta()
+        request = meta.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
                                                instance=instance_name)
         try:
             return request.execute()
@@ -183,8 +184,8 @@
                 raise err
         except Exception as err:
             logging.info(
-                "Unable to get Firewall: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
-            append_result(str({"error": "Unable to get Firewall",
+                "Unable to get instance: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+            append_result(str({"error": "Unable to get instance",
                                "error_message": str(err) + "\n Traceback: " + traceback.print_exc(
                                    file=sys.stdout)}))
             traceback.print_exc(file=sys.stdout)
@@ -210,7 +211,7 @@
 
     def get_instance_public_ip_by_name(self, instance_name):
         try:
-            result = GCPMeta().get_instance(instance_name)
+            result = self.get_instance(instance_name)
             if result:
                 for i in result.get('networkInterfaces'):
                     for j in i.get('accessConfigs'):
@@ -270,7 +271,7 @@
             traceback.print_exc(file=sys.stdout)
 
     def get_service_account(self, service_account_name, service_base_name):
-        unique_index = GCPMeta().get_index_by_service_account_name(service_account_name)
+        unique_index = self.get_index_by_service_account_name(service_account_name)
         if unique_index == '':
             service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_base_name, self.project)
         else:
@@ -343,7 +344,7 @@
 
     def get_private_ip_address(self, instance_name):
         try:
-            result = GCPMeta().get_instance(instance_name)
+            result = self.get_instance(instance_name)
             for i in result['networkInterfaces']:
                 return i['networkIP']
         except Exception as err:
@@ -700,7 +701,7 @@
 
     def dataproc_waiter(self, labels):
         if os.path.exists(
-                '/response/.emr_creating_' + os.environ['exploratory_name']) or GCPMeta().get_not_configured_dataproc(
+                '/response/.emr_creating_' + os.environ['exploratory_name']) or self.get_not_configured_dataproc(
                 os.environ['notebook_instance_name']):
             with hide('stderr', 'running', 'warnings'):
                 local("echo 'Some Dataproc cluster is still being created/terminated, waiting..'")
@@ -738,10 +739,10 @@
         try:
             private_list_ip = []
             if conf_type == 'edge_node' or conf_type == 'exploratory':
-                private_list_ip.append(GCPMeta().get_private_ip_address(
+                private_list_ip.append(self.get_private_ip_address(
                 instance_id))
             elif conf_type == 'computational_resource':
-                instance_list = GCPMeta().get_list_instances_by_label(
+                instance_list = self.get_list_instances_by_label(
                     os.environ['gcp_zone'], instance_id)
                 for instance in instance_list.get('items'):
                     private_list_ip.append(instance.get('networkInterfaces')[0].get('networkIP'))
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
index d73d7ee..582d58e 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
@@ -147,15 +147,24 @@
                 sudo('./configure')
                 sudo('make build')
                 sudo('make install')
-                sudo('luarocks install lua-resty-jwt')
-                sudo('luarocks install lua-resty-session')
-                sudo('luarocks install lua-resty-http')
-                sudo('luarocks install lua-resty-openidc')
-                sudo('luarocks install luacrypto')
-                sudo('luarocks install lua-cjson')
-                sudo('luarocks install lua-resty-core')
-                sudo('luarocks install random')
-                sudo('luarocks install lua-resty-string')
+                sudo('wget https://luarocks.org/manifests/cdbattags/lua-resty-jwt-0.2.0-0.src.rock')
+                sudo('luarocks build lua-resty-jwt-0.2.0-0.src.rock')
+                sudo('wget https://luarocks.org/manifests/bungle/lua-resty-session-2.26-1.src.rock')
+                sudo('luarocks build lua-resty-session-2.26-1.src.rock')
+                sudo('wget https://luarocks.org/manifests/pintsized/lua-resty-http-0.15-0.src.rock')
+                sudo('luarocks build lua-resty-http-0.15-0.src.rock')
+                sudo('wget https://luarocks.org/manifests/hanszandbelt/lua-resty-openidc-1.7.2-1.src.rock')
+                sudo('luarocks build lua-resty-openidc-1.7.2-1.src.rock')
+                sudo('wget https://luarocks.org/manifests/starius/luacrypto-0.3.2-2.src.rock')
+                sudo('luarocks build luacrypto-0.3.2-2.src.rock')
+                sudo('wget https://luarocks.org/manifests/openresty/lua-cjson-2.1.0.6-1.src.rock')
+                sudo('luarocks build lua-cjson-2.1.0.6-1.src.rock')
+                sudo('wget https://luarocks.org/manifests/avlubimov/lua-resty-core-0.1.17-4.src.rock')
+                sudo('luarocks build lua-resty-core-0.1.17-4.src.rock')
+                sudo('wget https://luarocks.org/manifests/hjpotter92/random-1.1-0.rockspec')
+                sudo('luarocks install random-1.1-0.rockspec')
+                sudo('wget https://luarocks.org/manifests/rsander/lua-resty-string-0.09-0.rockspec')
+                sudo('luarocks install lua-resty-string-0.09-0.rockspec')
 
             sudo('useradd -r nginx')
             sudo('rm -f /etc/nginx/nginx.conf')
diff --git a/infrastructure-provisioning/src/general/lib/os/fab.py b/infrastructure-provisioning/src/general/lib/os/fab.py
index 055df43..e5fc30f 100644
--- a/infrastructure-provisioning/src/general/lib/os/fab.py
+++ b/infrastructure-provisioning/src/general/lib/os/fab.py
@@ -119,6 +119,12 @@
 def append_result(error, exception=''):
     ts = time.time()
     st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
+    if exception:
+        error_message = "[Error-{}]: {}. Exception: {}".format(st, error, str(exception))
+        print(error_message)
+    else:
+        error_message = "[Error-{}]: {}.".format(st, error)
+        print(error_message)
     with open('/root/result.json', 'a+') as f:
         text = f.read()
     if len(text) == 0:
@@ -127,10 +133,7 @@
             f.write(res)
     with open("/root/result.json") as f:
         data = json.load(f)
-    if exception:
-        data['error'] = data['error'] + " [Error-" + st + "]:" + error + " Exception: " + str(exception)
-    else:
-        data['error'] = data['error'] + " [Error-" + st + "]:" + error
+    data['error'] = data['error'] + error_message
     with open("/root/result.json", 'w') as f:
         json.dump(data, f)
     print(data)
@@ -551,7 +554,7 @@
     run('git config --global https.proxy $https_proxy')
 
 
-def install_inactivity_checker(os_user, ip_adress, rstudio=False):
+def install_inactivity_checker(os_user, ip_address, rstudio=False):
     if not exists('/home/{}/.ensure_dir/inactivity_ensured'.format(os_user)):
         try:
             if not exists('/opt/inactivity'):
@@ -562,7 +565,7 @@
                 put('/root/templates/inactive_rs.sh', '/opt/inactivity/inactive.sh', use_sudo=True)
             else:
                 put('/root/templates/inactive.sh', '/opt/inactivity/inactive.sh', use_sudo=True)
-            sudo("sed -i 's|IP_ADRESS|{}|g' /opt/inactivity/inactive.sh".format(ip_adress))
+            sudo("sed -i 's|IP_ADRESS|{}|g' /opt/inactivity/inactive.sh".format(ip_address))
             sudo("chmod 755 /opt/inactivity/inactive.sh")
             sudo("chown root:root /etc/systemd/system/inactive.service")
             sudo("chown root:root /etc/systemd/system/inactive.timer")
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
index 1e9ef5c..1d5cb04 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
@@ -21,30 +21,33 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
 import json
 import uuid
+import os
 
 
 if __name__ == "__main__":
     try:
         image_conf = dict()
-        create_aws_config_files()
-        image_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+        dlab.actions_lib.create_aws_config_files()
+        image_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         image_conf['project_name'] = os.environ['project_name']
         image_conf['project_tag'] = os.environ['project_name']
+        image_conf['endpoint_name'] = os.environ['endpoint_name']
         image_conf['instance_name'] = os.environ['notebook_instance_name']
-        image_conf['instance_tag'] = '{}-Tag'.format(image_conf['service_base_name'])
+        image_conf['instance_tag'] = '{}-tag'.format(image_conf['service_base_name'])
         image_conf['application'] = os.environ['application']
-        image_conf['image_name'] = os.environ['notebook_image_name'].lower().replace('_', '-')
-        image_conf['full_image_name'] = '{}-{}-{}-{}'.format(image_conf['service_base_name'],
-                                                             image_conf['project_name'],
-                                                             image_conf['application'],
-                                                             image_conf['image_name']).lower()
+        image_conf['image_name'] = os.environ['notebook_image_name']
+        image_conf['full_image_name'] = '{}-{}-{}-{}-{}'.format(image_conf['service_base_name'],
+                                                                image_conf['project_name'],
+                                                                image_conf['endpoint_name'],
+                                                                image_conf['application'],
+                                                                image_conf['image_name'])
         image_conf['tags'] = {"Name": image_conf['full_image_name'],
                               "SBN": image_conf['service_base_name'],
                               "Project": image_conf['project_name'],
@@ -52,16 +55,19 @@
                               "FIN": image_conf['full_image_name'],
                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
 
-        ami_id = get_ami_id_by_name(image_conf['full_image_name'])
+        ami_id = dlab.meta_lib.get_ami_id_by_name(image_conf['full_image_name'])
         if ami_id == '':
             try:
-                os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+                os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
+                                                     ';project_tag:{0};endpoint_tag:{1};'.format(
+                                                         os.environ['project_name'], os.environ['endpoint_name'])
             except KeyError:
-                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
-            image_id = create_image_from_instance(tag_name=image_conf['instance_tag'],
-                                                  instance_name=image_conf['instance_name'],
-                                                  image_name=image_conf['full_image_name'],
-                                                  tags=json.dumps(image_conf['tags']))
+                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+                    os.environ['project_name'], os.environ['endpoint_name'])
+            image_id = dlab.actions_lib.create_image_from_instance(tag_name=image_conf['instance_tag'],
+                                                                   instance_name=image_conf['instance_name'],
+                                                                   image_name=image_conf['full_image_name'],
+                                                                   tags=json.dumps(image_conf['tags']))
             print("Image was successfully created. It's name is {}".format(image_conf['full_image_name']))
 
             with open("/root/result.json", 'w') as result:
@@ -73,5 +79,5 @@
                        "Action": "Create image from notebook"}
                 result.write(json.dumps(res))
     except Exception as err:
-        append_result("Failed to create image from notebook", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create image from notebook", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
index d7f5ee0..8b7f038 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
@@ -35,6 +35,7 @@
 parser.add_argument('--edge_role_name', type=str, default='')
 parser.add_argument('--notebook_role_name', type=str, default='')
 parser.add_argument('--region', type=str, default='')
+parser.add_argument('--endpoint_name', type=str, default='')
 parser.add_argument('--user_predefined_s3_policies', type=str, default='')
 args = parser.parse_args()
 
@@ -64,18 +65,19 @@
                     for i in list:
                         if i.get('PolicyName') in list_predefined_policies:
                             list_policies_arn.append(i.get('Arn'))
-                response = iam.create_policy(PolicyName='{}-{}-strict_to_S3-Policy'.
-                                             format(args.service_base_name, args.username), PolicyDocument=policy)
+                response = iam.create_policy(PolicyName='{}-{}-{}-strict_to_S3-Policy'.
+                                             format(args.service_base_name, args.username, args.endpoint_name),
+                                             PolicyDocument=policy)
                 time.sleep(10)
                 list_policies_arn.append(response.get('Policy').get('Arn'))
             except botocore.exceptions.ClientError as cle:
                 if cle.response['Error']['Code'] == 'EntityAlreadyExists':
-                    print("Policy {}-{}-strict_to_S3-Policy already exists. Reusing it.".
-                          format(args.service_base_name, args.username))
+                    print("Policy {}-{}-{}-strict_to_S3-Policy already exists. Reusing it.".
+                          format(args.service_base_name, args.username, args.endpoint_name))
                     list = iam.list_policies().get('Policies')
                     for i in list:
-                        if '{}-{}-strict_to_S3-Policy'.format(
-                                args.service_base_name, args.username) == i.get('PolicyName') or (
+                        if '{}-{}-{}-strict_to_S3-Policy'.format(
+                                args.service_base_name, args.username, args.endpoint_name) == i.get('PolicyName') or (
                                 args.user_predefined_s3_policies != 'None' and i.get('PolicyName') in
                                 list_predefined_policies):
                             list_policies_arn.append(i.get('Arn'))
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
index 5469e2a..b4dc3c6 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
@@ -132,7 +132,7 @@
             print("Associating route_table with the subnet")
             ec2 = boto3.resource('ec2')
             if os.environ['conf_duo_vpc_enable'] == 'true':
-                rt = get_route_table_by_tag(args.infra_tag_value + '-secondary-Tag', args.infra_tag_value)
+                rt = get_route_table_by_tag(args.infra_tag_value + '-secondary-tag', args.infra_tag_value)
             else:
                 rt = get_route_table_by_tag(args.infra_tag_name, args.infra_tag_value)
             route_table = ec2.RouteTable(rt)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py b/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
index bd7d266..8051e6d 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
@@ -43,7 +43,7 @@
     env.host_string = env.user + "@" + env.hosts
 
     service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower()[:12], '-', True)
+        os.environ['conf_service_base_name'][:20], '-', True)
     project_name = os.environ['project_name']
     endpoint_name = os.environ['endpoint_name']
     bucket_name = ('{0}-{1}-{2}-bucket'.format(service_base_name,
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
index 6fe139b..1d0df4f 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
@@ -24,11 +24,21 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    emr_id = dlab.meta_lib.get_emr_id_by_name(notebook_config['cluster_name'])
+    dlab.actions_lib.terminate_emr(emr_id)
+    dlab.actions_lib. remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'],
+                                     os.environ['notebook_instance_name'], os.environ['conf_os_user'],
+                                     notebook_config['key_path'], os.environ['emr_version'])
 
 
 if __name__ == "__main__":
@@ -38,38 +48,45 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    # generating variables dictionary
-    create_aws_config_files()
-    print('Generating infrastructure names and tags')
-    notebook_config = dict()
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
-    notebook_config['project_name'] = os.environ['project_name']
-    notebook_config['endpoint_name'] = os.environ['endpoint_name']
-    notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
-                                                                  notebook_config['project_name'],
-                                                                  notebook_config['endpoint_name'])).lower().replace('_', '-')
-    notebook_config['cluster_name'] = get_not_configured_emr(notebook_config['tag_name'],
-                                                             notebook_config['notebook_name'], True)
-    notebook_config['notebook_ip'] = get_instance_ip_address(notebook_config['tag_name'],
-                                                             notebook_config['notebook_name']).get('Private')
-    notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
-    notebook_config['cluster_id'] = get_emr_id_by_name(notebook_config['cluster_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    if os.environ['application'] == 'deeplearning':
-        application = 'jupyter'
-    else:
-        application = os.environ['application']
+    try:
+        # generating variables dictionary
+        dlab.actions_lib.create_aws_config_files()
+        print('Generating infrastructure names and tags')
+        notebook_config = dict()
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+                os.environ['conf_service_base_name'][:20], '-', True)
+        notebook_config['notebook_name'] = os.environ['notebook_instance_name']
+        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name']
+                                                                     ).lower().replace('_', '-')
+        notebook_config['cluster_name'] = dlab.meta_lib.get_not_configured_emr(notebook_config['tag_name'],
+                                                                               notebook_config['notebook_name'], True)
+        notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
+        notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+        notebook_config['cluster_id'] = dlab.meta_lib.get_emr_id_by_name(notebook_config['cluster_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    os.environ['project_name'], os.environ['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        if os.environ['application'] == 'deeplearning':
+            application = 'jupyter'
+        else:
+            application = os.environ['application']
+    except Exception as err:
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
 
     try:
         logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
         print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
-        params = "--bucket {} --cluster_name {} --emr_version {} --keyfile {} --notebook_ip {} --region {} --emr_excluded_spark_properties {} --project_name {} --os_user {}  --edge_hostname {} --proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
+        params = "--bucket {} --cluster_name {} --emr_version {} --keyfile {} --notebook_ip {} --region {} " \
+                 "--emr_excluded_spark_properties {} --project_name {} --os_user {}  --edge_hostname {} " \
+                 "--proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
             .format(notebook_config['bucket_name'], notebook_config['cluster_name'], os.environ['emr_version'],
                     notebook_config['key_path'], notebook_config['notebook_ip'], os.environ['aws_region'],
                     os.environ['emr_excluded_spark_properties'], os.environ['project_name'],
@@ -77,17 +94,15 @@
                     os.environ['application'], os.environ['conf_pypi_mirror'])
         try:
             local("~/scripts/{}_{}.py {}".format(application, 'install_dataengine-service_kernels', params))
-            remove_emr_tag(notebook_config['cluster_id'], ['State'])
-            tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'], os.environ['conf_tag_resource_id'])
+            dlab.actions_lib.remove_emr_tag(notebook_config['cluster_id'], ['State'])
+            dlab.actions_lib.tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'],
+                                            os.environ['conf_tag_resource_id'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed installing EMR kernels.", str(err))
-        emr_id = get_emr_id_by_name(notebook_config['cluster_name'])
-        terminate_emr(emr_id)
-        remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'], os.environ['notebook_instance_name'],
-                       os.environ['conf_os_user'], notebook_config['key_path'], os.environ['emr_version'])
+        dlab.fab.append_result("Failed installing EMR kernels.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -101,17 +116,15 @@
                     os.environ['conf_os_user'])
         try:
             local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
-            remove_emr_tag(notebook_config['cluster_id'], ['State'])
-            tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'], os.environ['conf_tag_resource_id'])
+            dlab.actions_lib.remove_emr_tag(notebook_config['cluster_id'], ['State'])
+            dlab.actions_lib.tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'],
+                                            os.environ['conf_tag_resource_id'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed to configure Spark.", str(err))
-        emr_id = get_emr_id_by_name(notebook_config['cluster_name'])
-        terminate_emr(emr_id)
-        remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'], os.environ['notebook_instance_name'],
-                       os.environ['conf_os_user'], notebook_config['key_path'], os.environ['emr_version'])
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -121,6 +134,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
index 0cd06be..c80328b 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
+    for i in range(notebook_config['instance_count'] - 1):
+        slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], slave_name)
 
 
 if __name__ == "__main__":
@@ -41,25 +50,27 @@
 
     try:
         # generating variables dictionary
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
         print('Generating infrastructure names and tags')
         notebook_config = dict()
-        try:
+        if 'exploratory_name' in os.environ:
             notebook_config['exploratory_name'] = os.environ['exploratory_name']
-        except:
+        else:
             notebook_config['exploratory_name'] = ''
-        try:
+        if 'computational_name' in os.environ:
             notebook_config['computational_name'] = os.environ['computational_name']
-        except:
+        else:
             notebook_config['computational_name'] = ''
-        notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         notebook_config['region'] = os.environ['aws_region']
-        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
         notebook_config['project_name'] = os.environ['project_name']
-        notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
-                                          '-de-' + notebook_config['exploratory_name'] + '-' + \
-                                          notebook_config['computational_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['cluster_name'] = "{}-{}-{}-de-{}".format(notebook_config['service_base_name'],
+                                                                  notebook_config['project_name'],
+                                                                  notebook_config['endpoint_name'],
+                                                                  notebook_config['computational_name'])
         notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
         notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -67,21 +78,18 @@
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
         try:
-            notebook_config['spark_master_ip'] = get_instance_private_ip_address(
+            notebook_config['spark_master_ip'] = dlab.meta_lib.get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['master_node_name'])
-            notebook_config['notebook_ip'] = get_instance_private_ip_address(
+            notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
         except Exception as err:
-            print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to get ip address", str(err))
             sys.exit(1)
         notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
 
     except Exception as err:
-        remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(notebook_config['tag_name'], slave_name)
-        append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
         sys.exit(1)
 
     try:
@@ -99,11 +107,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(notebook_config['tag_name'], slave_name)
-        append_result("Failed installing Dataengine kernels.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -123,11 +128,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(notebook_config['tag_name'], slave_name)
-        append_result("Failed to configure Spark.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
         sys.exit(1)
 
     try:
@@ -136,6 +138,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
index 7e5e0ef..5c481ac 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import os
 import argparse
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -42,92 +44,107 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
+    try:
+        # generating variables dictionary
+        dlab.actions_lib.create_aws_config_files()
+        notebook_config = dict()
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['edge_name'] = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                              notebook_config['project_name'],
+                                                              notebook_config['endpoint_name'])
+        edge_status = dlab.meta_lib.get_instance_status(notebook_config['service_base_name'] + '-tag',
+                                                        notebook_config['edge_name'])
+        if edge_status != 'running':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            notebook_config['ssn_hostname'] = dlab.meta_lib.get_instance_hostname(
+                '{}-tag'.format(notebook_config['service_base_name']),
+                '{}-ssn'.format(notebook_config['service_base_name']))
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         notebook_config['ssn_hostname'])
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+        print('Generating infrastructure names and tags')
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
 
-    # generating variables dictionary
-    create_aws_config_files()
-    notebook_config = dict()
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['edge_name'] = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                          os.environ['project_name'], os.environ['endpoint_name'])
-    edge_status = get_instance_status(notebook_config['service_base_name'] + '-Tag', notebook_config['edge_name'])
-    if edge_status != 'running':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = get_instance_hostname(notebook_config['service_base_name'] + '-Tag', notebook_config['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
-        append_result("Edge node is unavailable")
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(
+            os.environ['application'])
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+        notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}-{4}'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+            os.environ['application'], os.environ['notebook_image_name']) if (x != 'None' and x != '')
+            else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
+        print('Searching pre-configured images')
+        notebook_config['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+            os.environ['conf_os_family'])])
+        image_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['notebook_image_name'], 'available')
+        if image_id != '':
+            notebook_config['ami_id'] = image_id
+            print('Pre-configured image found. Using: {}'.format(notebook_config['ami_id']))
+        else:
+            os.environ['notebook_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+            print('No pre-configured image found. Using default one: {}'.format(notebook_config['ami_id']))
+
+        tag = {"Key": notebook_config['tag_name'],
+               "Value": "{}-{}-{}-subnet".format(notebook_config['service_base_name'], notebook_config['project_name'],
+                                                 notebook_config['endpoint_name'])}
+        notebook_config['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+
+        with open('/root/result.json', 'w') as f:
+            data = {"notebook_name": notebook_config['instance_name'], "error": ""}
+            json.dump(data, f)
+
+        try:
+            os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                notebook_config['project_name'], notebook_config['endpoint_name'], os.environ['conf_additional_tags'])
+        except KeyError:
+            os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+                notebook_config['project_name'], notebook_config['endpoint_name'])
+
+        print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
-    print('Generating infrastructure names and tags')
-    try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(os.environ['application'])
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                       os.environ['endpoint_name'],
-                                                                                       os.environ['application'])
-    notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}'.format(notebook_config['service_base_name'],
-                                                                                 os.environ['project_name'],
-                                                                                 os.environ['application'],
-                                                                                 os.environ['notebook_image_name']).lower().replace('_', '-') if (x != 'None' and x != '')
-        else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
-    print('Searching pre-configured images')
-    notebook_config['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
-    image_id = get_ami_id_by_name(notebook_config['notebook_image_name'], 'available')
-    if image_id != '':
-        notebook_config['ami_id'] = image_id
-        print('Pre-configured image found. Using: {}'.format(notebook_config['ami_id']))
-    else:
-        os.environ['notebook_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
-        print('No pre-configured image found. Using default one: {}'.format(notebook_config['ami_id']))
-    
-    tag = {"Key": notebook_config['tag_name'],
-           "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
-    notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-
-    with open('/root/result.json', 'w') as f:
-        data = {"notebook_name": notebook_config['instance_name'], "error": ""}
-        json.dump(data, f)
-
-    try:
-        os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
-    except KeyError:
-        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
-
-    print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
-
 
     # launching instance for notebook server
     try:
         logging.info('[CREATE NOTEBOOK INSTANCE]')
         print('[CREATE NOTEBOOK INSTANCE]')
-        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --instance_class {} --instance_disk_size {} --primary_disk_size {}" \
-            .format(notebook_config['instance_name'], notebook_config['ami_id'], notebook_config['instance_type'],
-                    notebook_config['key_name'], get_security_group_by_name(notebook_config['security_group_name']),
-                    get_subnet_by_cidr(notebook_config['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
-                    notebook_config['role_profile_name'],
-                    notebook_config['tag_name'], notebook_config['instance_name'], instance_class,
-                    os.environ['notebook_disk_size'], notebook_config['primary_disk_size'])
+        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} " \
+                 "--iam_profile {} --infra_tag_name {} --infra_tag_value {} --instance_class {} " \
+                 "--instance_disk_size {} --primary_disk_size {}" .format(
+                  notebook_config['instance_name'], notebook_config['ami_id'], notebook_config['instance_type'],
+                  notebook_config['key_name'],
+                  dlab.meta_lib.get_security_group_by_name(notebook_config['security_group_name']),
+                  dlab.meta_lib.get_subnet_by_cidr(notebook_config['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
+                  notebook_config['role_profile_name'],
+                  notebook_config['tag_name'], notebook_config['instance_name'], instance_class,
+                  os.environ['notebook_disk_size'], notebook_config['primary_disk_size'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
 
@@ -135,6 +152,6 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed to create instance.", str(err))
+        dlab.fab.append_result("Failed to create instance.", str(err))
         sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
index 9a60aa2..d153082 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 import argparse
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,13 +42,12 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
 
     try:
         logging.info('[START NOTEBOOK]')
@@ -54,10 +55,10 @@
         params = "--tag_name {} --nb_tag_value {}".format(notebook_config['tag_name'], notebook_config['notebook_name'])
         try:
             print("Starting notebook")
-            start_ec2(notebook_config['tag_name'], notebook_config['notebook_name'])
+            dlab.actions_lib.start_ec2(notebook_config['tag_name'], notebook_config['notebook_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to start notebook.", str(err))
+            dlab.fab.append_result("Failed to start notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -65,8 +66,8 @@
     try:
         logging.info('[SETUP USER GIT CREDENTIALS]')
         print('[SETUP USER GIT CREDENTIALS]')
-        notebook_config['notebook_ip'] = get_instance_ip_address(notebook_config['tag_name'],
-                                                                 notebook_config['notebook_name']).get('Private')
+        notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
         notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
@@ -74,7 +75,7 @@
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to setup git credentials.", str(err))
+            dlab.fab.append_result("Failed to setup git credentials.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -88,15 +89,15 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
 
-
     try:
-        ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
-        dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['notebook_name'])
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['notebook_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['notebook_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(notebook_config['notebook_name']))
@@ -110,8 +111,8 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
 
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
index dc61a7a..679d4eb 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
@@ -24,13 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
+import traceback
 import os
 import uuid
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import boto3
 import argparse
 import sys
@@ -39,7 +39,7 @@
 def stop_notebook(nb_tag_value, bucket_name, tag_name, ssh_user, key_path):
     print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
     try:
-        clusters_list = get_emr_list(nb_tag_value, 'Value')
+        clusters_list = dlab.meta_lib.get_emr_list(nb_tag_value, 'Value')
         if clusters_list:
             for cluster_id in clusters_list:
                 computational_name = ''
@@ -51,11 +51,12 @@
                 for tag in cluster.get('Tags'):
                     if tag.get('Key') == 'ComputationalName':
                         computational_name = tag.get('Value')
-                s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+                dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
                 print("The bucket {} has been cleaned successfully".format(bucket_name))
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
-                remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version, computational_name)
+                dlab.actions_lib.remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version,
+                                                computational_name)
                 print("{} kernels have been removed from notebook successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
@@ -66,22 +67,22 @@
     try:
         cluster_list = []
         master_ids = []
-        cluster_instances_list = get_ec2_list('dataengine_notebook_name', nb_tag_value)
+        cluster_instances_list = dlab.meta_lib.get_ec2_list('dataengine_notebook_name', nb_tag_value)
         for instance in cluster_instances_list:
             for tag in instance.tags:
                 if tag['Key'] == 'Type' and tag['Value'] == 'master':
                     master_ids.append(instance.id)
         for id in master_ids:
-            for tag in get_instance_attr(id, 'tags'):
+            for tag in dlab.meta_lib.get_instance_attr(id, 'tags'):
                 if tag['Key'] == 'Name':
                     cluster_list.append(tag['Value'].replace(' ', '')[:-2])
-        stop_ec2('dataengine_notebook_name', nb_tag_value)
+        dlab.actions_lib.stop_ec2('dataengine_notebook_name', nb_tag_value)
     except:
         sys.exit(1)
 
     print("Stopping notebook")
     try:
-        stop_ec2(tag_name, nb_tag_value)
+        dlab.actions_lib.stop_ec2(tag_name, nb_tag_value)
     except:
         sys.exit(1)
 
@@ -95,18 +96,18 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['project_name'] = os.environ['project_name']
     notebook_config['endpoint_name'] = os.environ['endpoint_name']
-    notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+    notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                   notebook_config['project_name'],
-                                                                  notebook_config['endpoint_name'])).lower().replace('_', '-')
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+                                                                  notebook_config['endpoint_name']
+                                                                 ).lower().replace('_', '-')
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
     notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
 
     logging.info('[STOP NOTEBOOK]')
@@ -116,7 +117,7 @@
                       os.environ['conf_os_user'], notebook_config['key_path'])
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop notebook.", str(err))
         sys.exit(1)
 
 
@@ -128,7 +129,7 @@
                    "Action": "Stop notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
index caeaf70..c199089 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
@@ -24,17 +24,19 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
+import boto3
 import uuid
 
 
 def terminate_nb(nb_tag_value, bucket_name, tag_name):
     print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
     try:
-        clusters_list = get_emr_list(nb_tag_value, 'Value')
+        clusters_list = dlab.meta_lib.get_emr_list(nb_tag_value, 'Value')
         if clusters_list:
             for cluster_id in clusters_list:
                 client = boto3.client('emr')
@@ -42,10 +44,10 @@
                 cluster = cluster.get("Cluster")
                 emr_name = cluster.get('Name')
                 print('Cleaning bucket from configs for cluster {}'.format(emr_name))
-                s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+                dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
                 print("The bucket {} has been cleaned successfully".format(bucket_name))
                 print('Terminating cluster {}'.format(emr_name))
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
@@ -54,13 +56,13 @@
 
     print("Terminating data engine cluster")
     try:
-        remove_ec2('dataengine_notebook_name', nb_tag_value)
+        dlab.actions_lib.remove_ec2('dataengine_notebook_name', nb_tag_value)
     except:
         sys.exit(1)
 
     print("Terminating notebook")
     try:
-        remove_ec2(tag_name, nb_tag_value)
+        dlab.actions_lib.remove_ec2(tag_name, nb_tag_value)
     except:
         sys.exit(1)
 
@@ -73,18 +75,18 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['project_name'] = os.environ['project_name']
     notebook_config['endpoint_name'] = os.environ['endpoint_name']
-    notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+    notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                   notebook_config['project_name'],
-                                                                  notebook_config['endpoint_name'])).lower().replace('_', '-')
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+                                                                  notebook_config['endpoint_name']
+                                                                 ).lower().replace('_', '-')
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
 
     try:
         logging.info('[TERMINATE NOTEBOOK]')
@@ -93,7 +95,7 @@
             terminate_nb(notebook_config['notebook_name'], notebook_config['bucket_name'], notebook_config['tag_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate notebook.", str(err))
+            dlab.fab.append_result("Failed to terminate notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -106,6 +108,6 @@
                    "Action": "Terminate notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
index ce76a1e..3da4f63 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
@@ -21,22 +21,23 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
 import json
+import os
 
 
 if __name__ == "__main__":
     try:
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
         image_conf = dict()
         image_conf['full_image_name'] = os.environ['notebook_image_name']
 
-        image_id = get_ami_id_by_name(image_conf['full_image_name'], 'available')
+        image_id = dlab.meta_lib.get_ami_id_by_name(image_conf['full_image_name'], 'available')
         if image_id != '':
-            deregister_image(image_conf['full_image_name'])
+            dlab.actions_lib.deregister_image(image_conf['full_image_name'])
 
             with open("/root/result.json", 'w') as result:
                 res = {"notebook_image_name": image_conf['full_image_name'],
@@ -44,5 +45,5 @@
                        "Action": "Delete existing notebook image"}
                 result.write(json.dumps(res))
     except Exception as err:
-        append_result("Failed to delete existing notebook image", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to delete existing notebook image", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
index 20206de..9e9fb40 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import logging
@@ -53,9 +54,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create dlab ssh user.", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed to create dlab ssh user.", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
     # configuring proxy on Data Engine service
@@ -72,27 +72,27 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE DATAENGINE SERVICE]')
         print('[CONFIGURE DATAENGINE SERVICE]')
         try:
-            configure_data_engine_service_pip(emr_conf['instance_ip'], emr_conf['os_user'], emr_conf['key_path'])
+            dlab.fab.configure_data_engine_service_pip(emr_conf['instance_ip'], emr_conf['os_user'],
+                                                       emr_conf['key_path'])
             env['connection_attempts'] = 100
             env.key_filename = emr_conf['key_path']
             env.host_string = emr_conf['os_user'] + '@' + emr_conf['instance_ip']
-            sudo('echo "[main]" > /etc/yum/pluginconf.d/priorities.conf ; echo "enabled = 0" >> /etc/yum/pluginconf.d/priorities.conf')
+            sudo('echo "[main]" > /etc/yum/pluginconf.d/priorities.conf ; echo "enabled = 0" >> '
+                 '/etc/yum/pluginconf.d/priorities.conf')
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure dataengine service.", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed to configure dataengine service.", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
 
@@ -130,12 +130,11 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed edge reverse proxy template", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed edge reverse proxy template", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
     try:
@@ -150,9 +149,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key", str(err))
-        terminate_emr(emr_conf['cluster_id'])
+        dlab.fab.append_result("Failed installing users key", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
         sys.exit(1)
 
 
@@ -163,75 +161,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.INFO,
                         filename=local_log_filepath)
+
     try:
-        os.environ['exploratory_name']
-    except:
-        os.environ['exploratory_name'] = ''
-    create_aws_config_files()
-    print('Generating infrastructure names and tags')
-    emr_conf = dict()
-    try:
-        emr_conf['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        emr_conf['exploratory_name'] = ''
-    try:
-        emr_conf['computational_name'] = os.environ['computational_name']
-    except:
-        emr_conf['computational_name'] = ''
-    emr_conf['apps'] = 'Hadoop Hive Hue Spark'
-    emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    emr_conf['project_name'] = os.environ['project_name']
-    emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    emr_conf['tag_name'] = emr_conf['service_base_name'] + '-Tag'
-    emr_conf['key_name'] = os.environ['conf_key_name']
-    emr_conf['region'] = os.environ['aws_region']
-    emr_conf['release_label'] = os.environ['emr_version']
-    emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
-    emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
-    emr_conf['instance_count'] = os.environ['emr_instance_count']
-    emr_conf['notebook_ip'] = get_instance_ip_address(emr_conf['tag_name'],
-                                                      os.environ['notebook_instance_name']).get('Private')
-    emr_conf['network_type'] = os.environ['conf_network_type']
-    emr_conf['role_service_name'] = os.environ['emr_service_role']
-    emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
-    emr_conf['tags'] = 'Name=' + emr_conf['service_base_name'] + '-' + os.environ['project_name'] + '-des-' + \
-                       emr_conf['exploratory_name'] + '-' + emr_conf['computational_name'] + '-' + args.uuid + \
-                       ', ' + emr_conf['service_base_name'] + '-Tag=' + emr_conf['service_base_name'] + '-' + \
-                       os.environ['project_name'] + '-des-' + emr_conf['exploratory_name'] + '-' + \
-                       emr_conf['computational_name'] + '-' + args.uuid + \
-                       ', Notebook=' + os.environ['notebook_instance_name'] + ', State=not-configured, Endpoint_tag=' + emr_conf['endpoint_name']
-    emr_conf['cluster_name'] = emr_conf['service_base_name'] + '-' + os.environ['project_name'] + '-des-' + \
-                               emr_conf['exploratory_name'] + '-' + emr_conf['computational_name'] + '-' + \
-                               args.uuid
-    emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
-                                                           emr_conf['endpoint_name'])).lower().replace('_', '-')
-    tag = {"Key": "{}-Tag".format(emr_conf['service_base_name']), "Value": "{}-{}-subnet".format(
-        emr_conf['service_base_name'], os.environ['project_name'])}
-    emr_conf['subnet_cidr'] = get_subnet_by_tag(tag)
-    emr_conf['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
-    emr_conf['all_ip_cidr'] = '0.0.0.0/0'
-    emr_conf['additional_emr_sg_name'] = '{}-{}-de-se-additional-sg'.format(emr_conf['service_base_name'],
-                                                                          os.environ['project_name'])
-    emr_conf['vpc_id'] = os.environ['aws_vpc_id']
-    emr_conf['cluster_id'] = get_emr_id_by_name(emr_conf['cluster_name'])
-    emr_conf['cluster_instances'] = get_emr_instances_list(emr_conf['cluster_id'])
-    emr_conf['cluster_master_instances'] = get_emr_instances_list(emr_conf['cluster_id'], 'MASTER')
-    emr_conf['cluster_core_instances'] = get_emr_instances_list(emr_conf['cluster_id'], 'CORE')
-    emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
-                                                               emr_conf['project_name'], emr_conf['endpoint_name'])
-    emr_conf['edge_instance_hostname'] = get_instance_private_ip_address(emr_conf['tag_name'],
-                                                                         emr_conf['edge_instance_name'])
-    if emr_conf['network_type'] == 'private':
-        emr_conf['edge_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'],
-                                                               emr_conf['edge_instance_name']).get('Private')
-    else:
-        emr_conf['edge_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'],
-                                                               emr_conf['edge_instance_name']).get('Public')
-    emr_conf['user_keyname'] = os.environ['project_name']
-    emr_conf['os_user'] = os.environ['conf_os_user']
-    emr_conf['initial_user'] = 'ec2-user'
-    emr_conf['sudo_group'] = 'wheel'
+        dlab.actions_lib.create_aws_config_files()
+        print('Generating infrastructure names and tags')
+        emr_conf = dict()
+        if 'exploratory_name' in os.environ:
+            emr_conf['exploratory_name'] = os.environ['exploratory_name']
+        else:
+            emr_conf['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            emr_conf['computational_name'] = os.environ['computational_name']
+        else:
+            emr_conf['computational_name'] = ''
+        emr_conf['apps'] = 'Hadoop Hive Hue Spark'
+        emr_conf['service_base_name'] = os.environ['conf_service_base_name']
+        emr_conf['project_name'] = os.environ['project_name']
+        emr_conf['endpoint_name'] = os.environ['endpoint_name']
+        emr_conf['tag_name'] = emr_conf['service_base_name'] + '-tag'
+        emr_conf['key_name'] = os.environ['conf_key_name']
+        emr_conf['region'] = os.environ['aws_region']
+        emr_conf['release_label'] = os.environ['emr_version']
+        emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
+        emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
+        emr_conf['instance_count'] = os.environ['emr_instance_count']
+        emr_conf['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
+        emr_conf['network_type'] = os.environ['conf_network_type']
+        emr_conf['role_service_name'] = os.environ['emr_service_role']
+        emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
+        emr_conf['tags'] = "Name={0}-{1}-{2}-des-{3}-{4}," \
+                           "{0}-tag={0}-{1}-{2}-des-{3}-{4}," \
+                           "Notebook={5}," \
+                           "State=not-configured," \
+                           "Endpoint_tag={2}".format(
+            emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'],
+            emr_conf['exploratory_name'], args.uuid, os.environ['notebook_instance_name'])
+        emr_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}-{4}' \
+            .format(emr_conf['service_base_name'],
+                    emr_conf['project_name'],
+                    emr_conf['endpoint_name'],
+                    emr_conf['computational_name'],
+                    args.uuid)
+        emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                               emr_conf['endpoint_name']).lower().replace('_', '-')
+        tag = {"Key": "{}-tag".format(emr_conf['service_base_name']), "Value": "{}-{}-{}-subnet".format(
+            emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'])}
+        emr_conf['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        emr_conf['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'],
+                                                  os.environ['conf_key_name'])
+        emr_conf['all_ip_cidr'] = '0.0.0.0/0'
+        emr_conf['additional_emr_sg_name'] = '{}-{}-{}-de-se-additional-sg'.format(emr_conf['service_base_name'],
+                                                                                   emr_conf['project_name'],
+                                                                                   emr_conf['endpoint_name'])
+        emr_conf['vpc_id'] = os.environ['aws_vpc_id']
+        emr_conf['cluster_id'] = dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])
+        emr_conf['cluster_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'])
+        emr_conf['cluster_master_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'MASTER')
+        emr_conf['cluster_core_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'CORE')
+        emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
+                                                                   emr_conf['project_name'], emr_conf['endpoint_name'])
+        emr_conf['edge_instance_hostname'] = dlab.meta_lib.get_instance_private_ip_address(
+            emr_conf['tag_name'], emr_conf['edge_instance_name'])
+        emr_conf['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(emr_conf['tag_name'],
+                                                                                 emr_conf['edge_instance_name'])
+        emr_conf['user_keyname'] = emr_conf['project_name']
+        emr_conf['os_user'] = os.environ['conf_os_user']
+        emr_conf['initial_user'] = 'ec2-user'
+        emr_conf['sudo_group'] = 'wheel'
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
+        sys.exit(1)
 
     try:
         jobs = []
@@ -252,14 +253,14 @@
         logging.info('[SUMMARY]')
         ip_address = emr_conf['cluster_master_instances'][0].get('PrivateIpAddress')
         emr_master_url = "http://" + ip_address + ":8088"
-        emr_master_acces_url = "https://" + emr_conf['edge_instance_ip'] + "/{}/".format(emr_conf['exploratory_name'] +
-                                                                                         '_' +
-                                                                                         emr_conf['computational_name'])
+        emr_master_acces_url = "https://{}/{}_{}/".format(emr_conf['edge_instance_hostname'],
+                                                          emr_conf['exploratory_name'],
+                                                          emr_conf['computational_name'])
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
         print("Service base name: {}".format(emr_conf['service_base_name']))
         print("Cluster name: {}".format(emr_conf['cluster_name']))
-        print("Cluster id: {}".format(get_emr_id_by_name(emr_conf['cluster_name'])))
+        print("Cluster id: {}".format(dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])))
         print("Key name: {}".format(emr_conf['key_name']))
         print("Region: {}".format(emr_conf['region']))
         print("EMR version: {}".format(emr_conf['release_label']))
@@ -270,7 +271,7 @@
         print("Bucket name: {}".format(emr_conf['bucket_name']))
         with open("/root/result.json", 'w') as result:
             res = {"hostname": emr_conf['cluster_name'],
-                   "instance_id": get_emr_id_by_name(emr_conf['cluster_name']),
+                   "instance_id": dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name']),
                    "key_name": emr_conf['key_name'],
                    "user_own_bucket_name": emr_conf['bucket_name'],
                    "Action": "Create new EMR cluster",
@@ -282,8 +283,7 @@
                    ]}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
-    sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
index 0f1f510..7dd94d9 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import argparse
 import sys
 import os
@@ -46,119 +47,111 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
-        os.environ['exploratory_name']
-    except:
-        os.environ['exploratory_name'] = ''
-    if os.path.exists('/response/.emr_creating_{}'.format(os.environ['exploratory_name'])):
-        time.sleep(30)
-    create_aws_config_files()
-    emr_conf = dict()
-    emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    emr_conf['project_name'] = os.environ['project_name']
-    emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    edge_status = get_instance_status(emr_conf['service_base_name'] + '-Tag', '{0}-{1}-{2}-edge'
-                                      .format(emr_conf['service_base_name'],
-                                              emr_conf['project_name'],
-                                              emr_conf['endpoint_name']))
-    if edge_status != 'running':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = get_instance_hostname(
-            emr_conf['service_base_name'] + '-Tag',
-            emr_conf['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable',
-                            os.environ['ssn_dlab_path'],
-                            os.environ['conf_os_user'], ssn_hostname)
-        append_result("Edge node is unavailable")
+        emr_conf = dict()
+        if 'exploratory_name' in os.environ:
+            emr_conf['exploratory_name'] = os.environ['exploratory_name']
+        else:
+            emr_conf['exploratory_name'] = ''
+        if os.path.exists('/response/.emr_creating_{}'.format(emr_conf['exploratory_name'])):
+            time.sleep(30)
+        dlab.actions_lib.create_aws_config_files()
+        emr_conf['service_base_name'] = os.environ['conf_service_base_name']
+        emr_conf['project_name'] = os.environ['project_name']
+        emr_conf['endpoint_name'] = os.environ['endpoint_name']
+        edge_status = dlab.meta_lib.get_instance_status(
+            '{}-tag'.format(emr_conf['service_base_name']),
+            '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                      emr_conf['endpoint_name']))
+        if edge_status != 'running':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            ssn_hostname = dlab.meta_lib.get_instance_hostname(
+                emr_conf['service_base_name'] + '-tag',
+                emr_conf['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable',
+                                         os.environ['ssn_dlab_path'],
+                                         os.environ['conf_os_user'], ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+        print('Generating infrastructure names and tags')
+        if 'computational_name' in os.environ:
+            emr_conf['computational_name'] = os.environ['computational_name']
+        else:
+            emr_conf['computational_name'] = ''
+        emr_conf['apps'] = 'Hadoop Hive Hue Spark'
+        emr_conf['tag_name'] = '{0}-tag'.format(emr_conf['service_base_name'])
+        emr_conf['key_name'] = os.environ['conf_key_name']
+        emr_conf['endpoint_tag'] = emr_conf['endpoint_name']
+        emr_conf['project_tag'] = emr_conf['project_name']
+        emr_conf['region'] = os.environ['aws_region']
+        emr_conf['release_label'] = os.environ['emr_version']
+        emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
+                                                                   emr_conf['project_name'], emr_conf['endpoint_name'])
+        emr_conf['edge_security_group_name'] = '{0}-sg'.format(emr_conf['edge_instance_name'])
+        emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
+        emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
+        emr_conf['instance_count'] = os.environ['emr_instance_count']
+        emr_conf['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
+        emr_conf['role_service_name'] = os.environ['emr_service_role']
+        emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
+        emr_conf['tags'] = 'Name={0}-{1}-{5}-des-{3},' \
+                           '{0}-tag={0}-{1}-{5}-des-{3},' \
+                           'Notebook={4},' \
+                           'State=not-configured,' \
+                           'ComputationalName={3},' \
+                           'Endpoint_tag={5}'\
+            .format(emr_conf['service_base_name'],
+                    emr_conf['project_name'],
+                    emr_conf['exploratory_name'],
+                    emr_conf['computational_name'],
+                    os.environ['notebook_instance_name'],
+                    emr_conf['endpoint_name'])
+        emr_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}-{4}'\
+            .format(emr_conf['service_base_name'],
+                    emr_conf['project_name'],
+                    emr_conf['endpoint_name'],
+                    emr_conf['computational_name'],
+                    args.uuid)
+        emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                               emr_conf['endpoint_name']).lower().replace('_', '-')
+        emr_conf['configurations'] = '[]'
+        if 'emr_configurations' in os.environ:
+            emr_conf['configurations'] = os.environ['emr_configurations']
+
+        tag = {"Key": "{}-tag".format(emr_conf['service_base_name']),
+               "Value": "{}-{}-{}-subnet".format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                 emr_conf['endpoint_name'])}
+        emr_conf['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        emr_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        emr_conf['all_ip_cidr'] = '0.0.0.0/0'
+        emr_conf['additional_emr_sg_name'] = '{}-{}-{}-de-se-additional-sg'\
+            .format(emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'])
+        emr_conf['vpc_id'] = os.environ['aws_vpc_id']
+        emr_conf['vpc2_id'] = os.environ['aws_notebook_vpc_id']
+        emr_conf['provision_instance_ip'] = None
+        try:
+            emr_conf['provision_instance_ip'] = dlab.meta_lib.get_instance_ip_address(
+                emr_conf['tag_name'], '{0}-{1}-endpoint'.format(emr_conf['service_base_name'],
+                                                                emr_conf['endpoint_name'])).get('Private') + "/32"
+        except:
+            emr_conf['provision_instance_ip'] = dlab.meta_lib.get_instance_ip_address(
+                emr_conf['tag_name'], '{0}-ssn'.format(emr_conf['service_base_name'])).get('Private') + "/32"
+        if os.environ['emr_slave_instance_spot'] == 'True':
+            ondemand_price = float(dlab.meta_lib.get_ec2_price(emr_conf['slave_instance_type'], emr_conf['region']))
+            emr_conf['slave_bid_price'] = (ondemand_price * int(os.environ['emr_slave_instance_spot_pct_price'])) / 100
+        else:
+            emr_conf['slave_bid_price'] = 0
+        if 'emr_timeout' in os.environ:
+            emr_conf['emr_timeout'] = os.environ['emr_timeout']
+        else:
+            emr_conf['emr_timeout'] = "1200"
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
         sys.exit(1)
-    print('Generating infrastructure names and tags')
-    try:
-        emr_conf['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        emr_conf['exploratory_name'] = ''
-    try:
-        emr_conf['computational_name'] = os.environ['computational_name']
-    except:
-        emr_conf['computational_name'] = ''
-    emr_conf['apps'] = 'Hadoop Hive Hue Spark'
 
-    emr_conf['tag_name'] = '{0}-Tag'.format(emr_conf['service_base_name'])
-    emr_conf['key_name'] = os.environ['conf_key_name']
-    emr_conf['endpoint_tag'] = os.environ['endpoint_name']
-    emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    emr_conf['project_tag'] = os.environ['project_name']
-    emr_conf['region'] = os.environ['aws_region']
-    emr_conf['release_label'] = os.environ['emr_version']
-    emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
-                                                               os.environ['project_name'], emr_conf['endpoint_tag'])
-    emr_conf['edge_security_group_name'] = '{0}-sg'.format(emr_conf['edge_instance_name'])
-    emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
-    emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
-    emr_conf['instance_count'] = os.environ['emr_instance_count']
-    emr_conf['notebook_ip'] = get_instance_ip_address(
-        emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
-    emr_conf['role_service_name'] = os.environ['emr_service_role']
-    emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
-    emr_conf['tags'] = 'Name={0}-{1}-des-{2}-{3},' \
-                       '{0}-Tag={0}-{1}-des-{2}-{3},' \
-                       'Notebook={4},' \
-                       'State=not-configured,' \
-                       'ComputationalName={3},' \
-                       'Endpoint_tag={5}'\
-        .format(emr_conf['service_base_name'],
-                os.environ['project_name'],
-                emr_conf['exploratory_name'],
-                emr_conf['computational_name'],
-                os.environ['notebook_instance_name'],
-                emr_conf['endpoint_name'])
-    emr_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}-{4}'\
-        .format(emr_conf['service_base_name'],
-                os.environ['project_name'],
-                emr_conf['exploratory_name'],
-                emr_conf['computational_name'],
-                args.uuid)
-    emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
-                                                           emr_conf['endpoint_name'])).lower().replace('_', '-')
-    emr_conf['configurations'] = '[]'
-    if 'emr_configurations' in os.environ:
-        emr_conf['configurations'] = os.environ['emr_configurations']
-
-    tag = {"Key": "{}-Tag".format(emr_conf['service_base_name']),
-           "Value": "{}-{}-subnet".format(emr_conf['service_base_name'],
-                                          os.environ['project_name'])}
-    emr_conf['subnet_cidr'] = get_subnet_by_tag(tag)
-    emr_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    emr_conf['all_ip_cidr'] = '0.0.0.0/0'
-    emr_conf['additional_emr_sg_name'] = '{}-{}-de-se-additional-sg'\
-        .format(emr_conf['service_base_name'], os.environ['project_name'])
-    emr_conf['vpc_id'] = os.environ['aws_vpc_id']
-    emr_conf['vpc2_id'] = os.environ['aws_notebook_vpc_id']
-    emr_conf['provision_instance_ip'] = None
-    try:
-        emr_conf['provision_instance_ip'] = get_instance_ip_address(
-            emr_conf['tag_name'], '{0}-{1}-endpoint'.format(emr_conf['service_base_name'],
-                                                            os.environ['endpoint_name'])).get('Private') + "/32"
-    except:
-        emr_conf['provision_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'], '{0}-ssn'.format(
-            emr_conf['service_base_name'])).get('Private') + "/32"
-    if os.environ['emr_slave_instance_spot'] == 'True':
-        ondemand_price = float(get_ec2_price(emr_conf['slave_instance_type'], emr_conf['region']))
-        emr_conf['slave_bid_price'] = (ondemand_price * int(os.environ['emr_slave_instance_spot_pct_price'])) / 100
-    else:
-        emr_conf['slave_bid_price'] = 0
-
-    try:
-        emr_conf['emr_timeout'] = os.environ['emr_timeout']
-    except:
-        emr_conf['emr_timeout'] = "1200"
-
-    print("Will create exploratory environment with edge node "
-          "as access point as following: {}".
-          format(json.dumps(emr_conf,
-                            sort_keys=True,
-                            indent=4,
-                            separators=(',', ': '))))
+    print("Will create exploratory environment with edge node as access point as following: {}".format(
+        json.dumps(emr_conf, sort_keys=True, indent=4, separators=(',', ': '))))
     logging.info(json.dumps(emr_conf))
 
     with open('/root/result.json', 'w') as f:
@@ -166,11 +159,11 @@
         json.dump(data, f)
 
     try:
-        emr_waiter(emr_conf['tag_name'], os.environ['notebook_instance_name'])
-        local('touch /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
+        dlab.meta_lib.emr_waiter(emr_conf['tag_name'], os.environ['notebook_instance_name'])
+        local('touch /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
     except Exception as err:
         traceback.print_exc()
-        append_result("EMR waiter fail.", str(err))
+        dlab.fab.append_result("EMR waiter fail.", str(err))
         sys.exit(1)
 
     with open('/root/result.json', 'w') as f:
@@ -180,8 +173,8 @@
     logging.info('[CREATING ADDITIONAL SECURITY GROUPS FOR EMR]')
     print("[CREATING ADDITIONAL SECURITY GROUPS FOR EMR]")
     try:
-        edge_group_id = check_security_group(emr_conf['edge_security_group_name'])
-        cluster_sg_ingress = format_sg([
+        edge_group_id = dlab.meta_lib.check_security_group(emr_conf['edge_security_group_name'])
+        cluster_sg_ingress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": emr_conf['subnet_cidr']}],
@@ -201,7 +194,7 @@
                 "PrefixListIds": []
             }
         ])
-        cluster_sg_egress = format_sg([
+        cluster_sg_egress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": emr_conf['subnet_cidr']}],
@@ -245,18 +238,18 @@
                    emr_conf['cluster_name'], True)
         try:
             if 'conf_additional_tags' in os.environ:
-                os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                    emr_conf['project_tag'], emr_conf['endpoint_tag'])
+                os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                    emr_conf['project_tag'], emr_conf['endpoint_tag'], os.environ['conf_additional_tags'])
             else:
-                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(emr_conf['project_tag'], emr_conf['endpoint_tag'])
+                os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(emr_conf['project_tag'],
+                                                                                               emr_conf['endpoint_tag'])
             print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create sg.", str(err))
+        dlab.fab.append_result("Failed to create sg.", str(err))
         sys.exit(1)
 
     local("echo Waiting for changes to propagate; sleep 10")
@@ -304,7 +297,7 @@
                     emr_conf['region'],
                     emr_conf['tags'],
                     os.environ['conf_key_dir'],
-                    os.environ['project_name'],
+                    emr_conf['project_name'],
                     os.environ['emr_slave_instance_spot'],
                     str(emr_conf['slave_bid_price']),
                     emr_conf['service_base_name'],
@@ -315,14 +308,12 @@
         except:
             traceback.print_exc()
             raise Exception
-
         cluster_name = emr_conf['cluster_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], emr_conf['key_name'])
-        local('rm /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
+        local('rm /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create EMR Cluster.", str(err))
-        local('rm /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
-        emr_id = get_emr_id_by_name(emr_conf['cluster_name'])
-        terminate_emr(emr_id)
+        dlab.fab.append_result("Failed to create EMR Cluster.", str(err))
+        local('rm /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
+        emr_id = dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])
+        dlab.actions_lib.terminate_emr(emr_id)
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
index 05f2e1f..e9551e3 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
@@ -21,18 +21,22 @@
 #
 # ******************************************************************************
 
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import boto3
+import logging
 import argparse
 import sys
 import os
+import traceback
+import json
 
 
 def terminate_emr_cluster(emr_name, bucket_name, tag_name, nb_tag_value, ssh_user, key_path):
     print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
     try:
-        clusters_list = get_emr_list(emr_name, 'Value')
+        clusters_list = dlab.meta_lib.get_emr_list(emr_name, 'Value')
         if clusters_list:
             for cluster_id in clusters_list:
                 computational_name = ''
@@ -44,13 +48,13 @@
                 for tag in cluster.get('Tags'):
                     if tag.get('Key') == 'ComputationalName':
                         computational_name = tag.get('Value')
-                s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+                dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
                 print("The bucket {} has been cleaned successfully".format(bucket_name))
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
                 print("Removing EMR kernels from notebook")
-                remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path,
-                               emr_version, computational_name)
+                dlab.actions_lib.remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path,
+                                                emr_version, computational_name)
         else:
             print("There are no EMR clusters to terminate.")
     except:
@@ -66,19 +70,18 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     emr_conf = dict()
-    emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    emr_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     emr_conf['emr_name'] = os.environ['emr_cluster_name']
     emr_conf['notebook_name'] = os.environ['notebook_instance_name']
     emr_conf['project_name'] = os.environ['project_name']
     emr_conf['endpoint_name'] = os.environ['endpoint_name']
-    emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
-                                                           emr_conf['endpoint_name'])).lower().replace('_', '-')
+    emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+                                                           emr_conf['endpoint_name']).lower().replace('_', '-')
     emr_conf['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
-    emr_conf['tag_name'] = emr_conf['service_base_name'] + '-Tag'
+    emr_conf['tag_name'] = emr_conf['service_base_name'] + '-tag'
 
     try:
         logging.info('[TERMINATE EMR CLUSTER]')
@@ -88,7 +91,7 @@
                                   emr_conf['notebook_name'], os.environ['conf_os_user'], emr_conf['key_path'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate EMR cluster.", str(err))
+            dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -101,6 +104,6 @@
                    "Action": "Terminate EMR cluster"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
index 224b0dc..e0a4f0c 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -37,13 +38,13 @@
 
 def configure_slave(slave_number, data_engine):
     slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
-    slave_hostname = get_instance_private_ip_address(data_engine['tag_name'], slave_name)
+    slave_hostname = dlab.meta_lib.get_instance_private_ip_address(data_engine['tag_name'], slave_name)
     try:
         logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
         print('[CREATING DLAB SSH USER ON SLAVE NODE]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
-            (slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
-             data_engine['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
+            data_engine['initial_user'], data_engine['dlab_ssh_user'], data_engine['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -51,12 +52,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to create ssh user on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -70,12 +67,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to clean slave instance.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean slave instance.", str(err))
         sys.exit(1)
 
     try:
@@ -91,12 +84,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to configure proxy on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -111,18 +100,15 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to install prerequisites on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
         print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
-        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
+        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
+                 "--scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
             format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
                    os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
@@ -133,12 +119,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to configure slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure slave node.", str(err))
         sys.exit(1)
 
     try:
@@ -153,15 +135,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed install users key on slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed install users key on slave node.", str(err))
         sys.exit(1)
 
 
+def clear_resources():
+    dlab.actions_lib.remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
+    for i in range(data_engine['instance_count'] - 1):
+        slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+        dlab.actions_lib.remove_ec2(data_engine['tag_name'], slave_name)
+
+
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
                                                os.environ['request_id'])
@@ -173,81 +158,77 @@
     try:
         print('Generating infrastructure names and tags')
         data_engine = dict()
-        try:
+        if 'exploratory_name' in os.environ:
             data_engine['exploratory_name'] = os.environ['exploratory_name']
-        except:
+        else:
             data_engine['exploratory_name'] = ''
-        try:
+        if 'computational_name' in os.environ:
             data_engine['computational_name'] = os.environ['computational_name']
-        except:
+        else:
             data_engine['computational_name'] = ''
-        data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['region'] = os.environ['aws_region']
         data_engine['network_type'] = os.environ['conf_network_type']
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
         data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
         data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
         data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
-        data_engine['dataengine_master_security_group_name'] = data_engine['service_base_name'] + '-' + \
-                                                               os.environ['project_name'] + '-dataengine-master-sg'
-        data_engine['dataengine_slave_security_group_name'] = data_engine['service_base_name'] + '-' + \
-                                                              os.environ['project_name'] + '-dataengine-slave-sg'
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+        data_engine['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
         tag = {"Key": data_engine['tag_name'],
-               "Value": "{}-{}-subnet".format(data_engine['service_base_name'], os.environ['project_name'])}
-        data_engine['subnet_cidr'] = get_subnet_by_tag(tag)
-        data_engine['notebook_dataengine_role_profile_name'] = data_engine['service_base_name']. \
-                                                                   lower().replace('-', '_') + "-" + \
-                                                               os.environ['project_name'] + "-" + os.environ['endpoint_name'] + '-nb-de-Profile'
+               "Value": "{}-{}-{}-subnet".format(data_engine['service_base_name'], data_engine['project_name'],
+                                                 data_engine['endpoint_name'])}
+        data_engine['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
-        master_node_hostname = get_instance_hostname(data_engine['tag_name'], data_engine['master_node_name'])
+        master_node_hostname = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'],
+                                                                   data_engine['master_node_name'])
         data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
-        data_engine['user_keyname'] = os.environ['project_name']
+        data_engine['user_keyname'] = data_engine['project_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        data_engine['project_name'] = os.environ['project_name']
-        data_engine['endpoint_name'] = os.environ['endpoint_name']
         edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
                                                        data_engine['project_name'], data_engine['endpoint_name'])
-        edge_instance_hostname = get_instance_hostname(data_engine['tag_name'], edge_instance_name)
-        edge_instance_private_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Private')
-        if data_engine['network_type'] == 'private':
-            edge_instance_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Private')
-        else:
-            edge_instance_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Public')
-
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        data_engine['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'],
+                                                                                    edge_instance_name)
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            data_engine['initial_user'] = 'ubuntu'
+            data_engine['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            data_engine['initial_user'] = 'ec2-user'
+            data_engine['sudo_group'] = 'wheel'
     except Exception as err:
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
         data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(int(os.environ['dataengine_instance_count']) - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER ON MASTER NODE]')
         print('[CREATING DLAB SSH USER ON MASTER NODE]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
-             data_engine['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
+            data_engine['initial_user'], data_engine['dlab_ssh_user'], data_engine['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -255,12 +236,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to create ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on master.", str(err))
         sys.exit(1)
 
     try:
@@ -274,12 +251,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to clean master instance.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -295,12 +268,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to configure proxy on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on master.", str(err))
         sys.exit(1)
 
     try:
@@ -315,12 +284,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed to install prerequisites on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
         sys.exit(1)
 
     try:
@@ -335,18 +300,15 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
-        append_result("Failed install users key on master node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed install users key on master node.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE MASTER NODE]')
         print('[CONFIGURE MASTER NODE]')
-        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
+        params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
+                 "--scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
             format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
                    os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
@@ -357,12 +319,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure master node", str(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
+        dlab.fab.append_result("Failed to configure master node", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -377,17 +335,15 @@
             if job.exitcode != 0:
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
+        dlab.fab.append_result("Failed to configure slave nodes.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
-        notebook_instance_ip = get_instance_private_ip_address('Name', os.environ['notebook_instance_name'])
+        notebook_instance_ip = dlab.meta_lib.get_instance_private_ip_address('Name',
+                                                                             os.environ['notebook_instance_name'])
         additional_info = {
             "computational_name": data_engine['computational_name'],
             "master_node_hostname": master_node_hostname,
@@ -412,22 +368,20 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            remove_ec2(data_engine['tag_name'], slave_name)
+        dlab.fab.append_result("Failed to configure reverse proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        ip_address = get_instance_ip_address(data_engine['tag_name'],
-                                             data_engine['master_node_name']).get('Private')
+        ip_address = dlab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
+                                                           data_engine['master_node_name']).get('Private')
         spark_master_url = "http://" + ip_address + ":8080"
-        spark_master_access_url = "https://" + edge_instance_ip + "/{}/".format(data_engine['exploratory_name'] +
-                                                                               '_' + data_engine['computational_name'])
+        spark_master_access_url = "https://{}/{}_{}/".format(data_engine['edge_instance_hostname'],
+                                                             data_engine['exploratory_name'],
+                                                             data_engine['computational_name'])
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
         print("Service base name: {}".format(data_engine['service_base_name']))
@@ -438,7 +392,8 @@
         print("Instance count: {}".format(str(data_engine['instance_count'])))
         with open("/root/result.json", 'w') as result:
             res = {"hostname": data_engine['cluster_name'],
-                   "instance_id": get_instance_by_name(data_engine['tag_name'], data_engine['master_node_name']),
+                   "instance_id": dlab.meta_lib.get_instance_by_name(data_engine['tag_name'],
+                                                                     data_engine['master_node_name']),
                    "key_name": data_engine['key_name'],
                    "Action": "Create new Data Engine",
                    "computational_url": [
@@ -449,6 +404,7 @@
                    ]}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
index 62b6a95..ad19f7a 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -43,84 +44,85 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
         data_engine = dict()
-        data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        edge_status = get_instance_status(data_engine['service_base_name'] + '-Tag',
-                                          '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
-                                                                    os.environ['project_name'],
-                                                                    os.environ['endpoint_name']))
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        edge_status = dlab.meta_lib.get_instance_status(
+            data_engine['service_base_name'] + '-tag', '{0}-{1}-{2}-edge'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name']))
         if edge_status != 'running':
             logging.info('ERROR: Edge node is unavailable! Aborting...')
             print('ERROR: Edge node is unavailable! Aborting...')
-            ssn_hostname = get_instance_hostname(data_engine['service_base_name'] + '-Tag',
-                                                 data_engine['service_base_name'] + '-ssn')
-            put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                                ssn_hostname)
-            append_result("Edge node is unavailable")
+            ssn_hostname = dlab.meta_lib.get_instance_hostname(data_engine['service_base_name'] + '-tag',
+                                                               data_engine['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
             sys.exit(1)
         print('Generating infrastructure names and tags')
-
-        try:
+        if 'exploratory_name' in os.environ:
             data_engine['exploratory_name'] = os.environ['exploratory_name']
-        except:
+        else:
             data_engine['exploratory_name'] = ''
-        try:
+        if 'computational_name' in os.environ:
             data_engine['computational_name'] = os.environ['computational_name']
-        except:
+        else:
             data_engine['computational_name'] = ''
-
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['region'] = os.environ['aws_region']
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
         data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
         data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
         data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
-        data_engine['dataengine_master_security_group_name'] = '{}-{}-dataengine-master-sg' \
-            .format(data_engine['service_base_name'], os.environ['project_name'])
-        data_engine['dataengine_slave_security_group_name'] = '{}-{}-dataengine-slave-sg' \
-            .format(data_engine['service_base_name'], os.environ['project_name'])
-        data_engine['tag_name'] = '{}-Tag'.format(data_engine['service_base_name'])
+        data_engine['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['tag_name'] = '{}-tag'.format(data_engine['service_base_name'])
         tag = {"Key": data_engine['tag_name'],
-               "Value": "{}-{}-subnet".format(data_engine['service_base_name'], os.environ['project_name'])}
-        data_engine['subnet_cidr'] = get_subnet_by_tag(tag)
-        data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-            .format(data_engine['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
+               "Value": "{}-{}-{}-subnet".format(data_engine['service_base_name'], data_engine['project_name'],
+                                                 data_engine['endpoint_name'])}
+        data_engine['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile' \
+            .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
         data_engine['cluster_nodes_tag'] = {"Key": "dataengine_notebook_name",
                                             "Value": os.environ['notebook_instance_name']}
         data_engine['cluster_nodes_resource_tag'] = {"Key": os.environ['conf_tag_resource_id'],
-                                                     "Value": data_engine['service_base_name'] + ':' +
-                                                              data_engine['cluster_name']}
+                                                     "Value": "{}:{}".format(data_engine['service_base_name'],
+                                                                             data_engine['cluster_name'])}
         data_engine['cluster_nodes_billing_tag'] = {"Key": os.environ['conf_billing_tag_key'],
-                                                     "Value": os.environ['conf_billing_tag_value']}
+                                                    "Value": os.environ['conf_billing_tag_value']}
         data_engine['primary_disk_size'] = '30'
         data_engine['instance_class'] = 'dataengine'
 
         if os.environ['conf_shared_image_enabled'] == 'false':
-            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
+            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'],
+                os.environ['application'])
         else:
             data_engine['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['application'])
+                                                                                     data_engine['endpoint_name'],
+                                                                                     os.environ['application'])
         data_engine['notebook_image_name'] = (
-            lambda x: '{0}-{1}-{2}-{3}'.format(data_engine['service_base_name'],
-                                               os.environ['project_name'],
-                                               os.environ['application'],
-                                               os.environ['notebook_image_name'].lower().replace('_', '-')) if (
+            lambda x: '{0}-{1}-{4}-{2}-{3}'.format(data_engine['service_base_name'],
+                                                   data_engine['project_name'],
+                                                   os.environ['application'],
+                                                   os.environ['notebook_image_name'],
+                                                   data_engine['endpoint_name']) if (
                     x != 'None' and x != '')
             else data_engine['expected_image_name'])(str(os.environ.get('notebook_image_name')))
         print('Searching pre-configured images')
-        data_engine['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
-        image_id = get_ami_id_by_name(data_engine['notebook_image_name'], 'available')
+        data_engine['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+            os.environ['conf_os_family'])])
+        image_id = dlab.meta_lib.get_ami_id_by_name(data_engine['notebook_image_name'], 'available')
         if image_id != '' and os.environ['application'] in os.environ['dataengine_image_notebooks'].split(','):
             data_engine['ami_id'] = image_id
             print('Pre-configured image found. Using: {}'.format(data_engine['ami_id']))
@@ -129,8 +131,7 @@
             print('No pre-configured image found. Using default one: {}'.format(data_engine['ami_id']))
 
     except Exception as err:
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     with open('/root/result.json', 'w') as f:
@@ -138,35 +139,39 @@
         json.dump(data, f)
 
     try:
-        os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+        os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+            data_engine['project_name'], data_engine['endpoint_name'], os.environ['conf_additional_tags'])
     except KeyError:
-        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
+        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(data_engine['project_name'],
+                                                                                       data_engine['endpoint_name'])
     print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
 
     try:
         logging.info('[CREATE MASTER NODE]')
         print('[CREATE MASTER NODE]')
         data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "master"}
-        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
+        params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} " \
+                 "--subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} " \
+                 "--instance_class {}" \
             .format(data_engine['master_node_name'], data_engine['ami_id'], data_engine['master_size'],
                     data_engine['key_name'],
-                    get_security_group_by_name(data_engine['dataengine_master_security_group_name']),
-                    get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
+                    dlab.meta_lib.get_security_group_by_name(data_engine['dataengine_master_security_group_name']),
+                    dlab.meta_lib.get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
                     data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
                     data_engine['master_node_name'], data_engine['primary_disk_size'], data_engine['instance_class'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
-            data_engine['master_id'] = get_instance_by_name(data_engine['tag_name'], data_engine['master_node_name'])
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag'], False)
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_resource_tag'], False)
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_billing_tag'], False)
-            create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag_type'], False)
+            data_engine['master_id'] = dlab.meta_lib.get_instance_by_name(data_engine['tag_name'],
+                                                                          data_engine['master_node_name'])
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag'], False)
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_resource_tag'], False)
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_billing_tag'], False)
+            dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag_type'], False)
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create master instance.", str(err))
+        dlab.fab.append_result("Failed to create master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -175,31 +180,32 @@
             print('[CREATE SLAVE NODE {}]'.format(i + 1))
             slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
             data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "slave"}
-            params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
+            params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} " \
+                     "--subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} " \
+                     "--primary_disk_size {} --instance_class {}" \
                 .format(slave_name, data_engine['ami_id'], data_engine['slave_size'],
                         data_engine['key_name'],
-                        get_security_group_by_name(data_engine['dataengine_slave_security_group_name']),
-                        get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
+                        dlab.meta_lib.get_security_group_by_name(data_engine['dataengine_slave_security_group_name']),
+                        dlab.meta_lib.get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
                         data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
                         slave_name, data_engine['primary_disk_size'], data_engine['instance_class'])
             try:
                 local("~/scripts/{}.py {}".format('common_create_instance', params))
-                data_engine['slave_id'] = get_instance_by_name(data_engine['tag_name'], slave_name)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag'], False)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_resource_tag'], False)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_billing_tag'], False)
-                create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag_type'], False)
+                data_engine['slave_id'] = dlab.meta_lib.get_instance_by_name(data_engine['tag_name'], slave_name)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag'], False)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_resource_tag'], False)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_billing_tag'], False)
+                dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag_type'], False)
             except:
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
+        dlab.actions_lib.remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
         for i in range(data_engine['instance_count'] - 1):
             slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
             try:
-                remove_ec2(data_engine['tag_name'], slave_name)
+                dlab.actions_lib.remove_ec2(data_engine['tag_name'], slave_name)
             except:
                 print("The slave instance {} hasn't been created.".format(slave_name))
-        append_result("Failed to create slave instances.", str(err))
+        dlab.fab.append_result("Failed to create slave instances.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
index 2e299d4..0450ff7 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
@@ -24,14 +24,18 @@
 import logging
 import json
 import os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
+import traceback
+from fabric.api import *
 
 
 def start_data_engine(cluster_name):
     print("Start Data Engine")
     try:
-        start_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+        dlab.actions_lib.start_ec2(os.environ['conf_tag_resource_id'], cluster_name)
     except:
         sys.exit(1)
 
@@ -47,7 +51,7 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     data_engine = dict()
     
@@ -59,15 +63,14 @@
         data_engine['computational_name'] = os.environ['computational_name']
     except:
         data_engine['computational_name'] = ''
-    data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
     data_engine['project_name'] = os.environ['project_name']
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + \
-        data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + \
-        data_engine['computational_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
 
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
 
     logging.info('[START DATA ENGINE CLUSTER]')
     print('[START DATA ENGINE CLUSTER]')
@@ -76,19 +79,18 @@
                                          data_engine['cluster_name']))
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to start Data Engine.", str(err))
+        dlab.fab.append_result("Failed to start Data Engine.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[UPDATE LAST ACTIVITY TIME]')
         print('[UPDATE LAST ACTIVITY TIME]')
         data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
-        data_engine['notebook_ip'] = get_instance_ip_address(data_engine['tag_name'],
-                                                                    os.environ['notebook_instance_name']).get('Private')
-        data_engine['computational_ip'] = get_instance_ip_address(data_engine['tag_name'],
-                                                                         data_engine['computational_id']).get(
-            'Private')
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+        data_engine['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+            data_engine['tag_name'], os.environ['notebook_instance_name']).get('Private')
+        data_engine['computational_ip'] = dlab.meta_lib.get_instance_ip_address(
+            data_engine['tag_name'], data_engine['computational_id']).get('Private')
         data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
             .format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -97,7 +99,7 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -108,6 +110,6 @@
                    "Action": "Start Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
index 3cb0d3b..d31d395 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
@@ -24,14 +24,15 @@
 import logging
 import json
 import os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
 import sys
 
 
 def stop_data_engine(cluster_name):
     print("Stop Data Engine")
     try:
-        stop_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+        dlab.actions_lib.stop_ec2(os.environ['conf_tag_resource_id'], cluster_name)
     except:
         sys.exit(1)
 
@@ -47,7 +48,7 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     data_engine_config = dict()
     try:
@@ -58,14 +59,13 @@
         data_engine_config['computational_name'] = os.environ['computational_name']
     except:
         data_engine_config['computational_name'] = ''
-    data_engine_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    data_engine_config['service_base_name'] = (os.environ['conf_service_base_name'])
     data_engine_config['project_name'] = os.environ['project_name']
-    data_engine_config['cluster_name'] = \
-        data_engine_config['service_base_name'] + '-' \
-        + data_engine_config['project_name'] + '-de-' + \
-        data_engine_config['exploratory_name'] + '-' \
-        + data_engine_config['computational_name']
+    data_engine_config['endpoint_name'] = os.environ['endpoint_name']
+    data_engine_config['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine_config['service_base_name'],
+                                                                 data_engine_config['project_name'],
+                                                                 data_engine_config['endpoint_name'],
+                                                                 data_engine_config['computational_name'])
 
     logging.info('[STOP DATA ENGINE CLUSTER]')
     print('[STOP DATA ENGINE CLUSTER]')
@@ -74,7 +74,7 @@
                                         data_engine_config['cluster_name']))
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to stop Data Engine.", str(err))
+        dlab.fab.append_result("Failed to stop Data Engine.", str(err))
         sys.exit(1)
 
     try:
@@ -83,6 +83,6 @@
                    "Action": "Stop Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
index a036f74..7d8c10d 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
@@ -24,8 +24,9 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import traceback
 import os
 
 
@@ -34,14 +35,14 @@
                           cluster_name, remote_kernel_name):
     print("Terminating data engine cluster")
     try:
-        remove_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+        dlab.actions_lib.remove_ec2(os.environ['conf_tag_resource_id'], cluster_name)
     except:
         sys.exit(1)
 
     print("Removing Data Engine kernels from notebook")
     try:
-        remove_dataengine_kernels(tag_name, notebook_name,
-                                  os_user, key_path, remote_kernel_name)
+        dlab.actions_lib.remove_dataengine_kernels(tag_name, notebook_name,
+                                                   os_user, key_path, remote_kernel_name)
     except:
         sys.exit(1)
 
@@ -57,7 +58,7 @@
                         filename=local_log_filepath)
     # generating variables dictionary
     print('Generating infrastructure names and tags')
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     data_engine = dict()
     
     try:
@@ -68,18 +69,16 @@
         data_engine['computational_name'] = os.environ['computational_name']
     except:
         data_engine['computational_name'] = ''
-    data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+    data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+    data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
     data_engine['project_name'] = os.environ['project_name']
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + \
-        data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' +\
-        data_engine['computational_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     data_engine['notebook_name'] = os.environ['notebook_instance_name']
-    data_engine['key_path'] = os.environ['conf_key_dir'] + '/' + \
-                              os.environ['conf_key_name'] + '.pem'
+    data_engine['key_path'] = "{}/{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
 
     try:
         logging.info('[TERMINATE DATA ENGINE]')
@@ -93,7 +92,7 @@
                     data_engine['cluster_name']), data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Data Engine.", str(err))
+            dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -104,6 +103,6 @@
                    "Action": "Terminate Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
index 7d504d8..a2ca856 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -42,69 +44,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -123,16 +132,16 @@
         print('[CONFIGURE PROXY ON DEEP LEARNING  INSTANCE]')
         additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
         params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -145,12 +154,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -166,9 +174,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -178,7 +185,7 @@
                  "--os_user {2} --jupyter_version {3} " \
                  "--scala_version {4} --spark_version {5} " \
                  "--hadoop_version {6} --region {7} " \
-                 "--r_mirror {8} --ip_adress {9} --exploratory_name {10} --edge_ip {11}" \
+                 "--r_mirror {8} --ip_address {9} --exploratory_name {10} --edge_ip {11}" \
                  .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
                          os.environ['notebook_jupyter_version'], os.environ['notebook_scala_version'],
                          os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
@@ -190,9 +197,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Deep Learning node.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -203,12 +209,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -224,9 +229,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -251,99 +255,107 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        notebook_config['project_name'], notebook_config['endpoint_name'],
+                        os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                        notebook_config['project_name'], notebook_config['endpoint_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], notebook_config['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
-                        os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                        notebook_config['endpoint_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        tensor_board_url = 'http://' + ip_address + ':6006'
+        jupyter_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+                                                                notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    tensor_board_url = 'http://' + ip_address + ':6006'
-    jupyter_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
-    jupyter_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    tensorboard_access_url = "https://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'],notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'],notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_access_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_access_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_access_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensor_board_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url},
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensor_board_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url},
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py b/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
index 4d414b1..d96ef49 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
@@ -22,10 +22,17 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
+import uuid
+from fabric.api import *
+
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,81 +42,103 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    print('Generating infrastructure names and tags')
-    edge_conf = dict()
-    edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    edge_conf['key_name'] = os.environ['conf_key_name']
-    edge_conf['user_key'] = os.environ['key']
-    edge_conf['project_name'] = os.environ['project_name']
-    edge_conf['endpoint_name'] = os.environ['endpoint_name']
-    edge_conf['instance_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'], edge_conf['project_name'],
-                                                        edge_conf['endpoint_name'])
-    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
-    edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
-                                                           edge_conf['project_name'],
-                                                           edge_conf['endpoint_name']).lower().replace('_', '-')
-    edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
-                                                                     edge_conf['endpoint_name']).lower().replace('_', '-')
-    edge_conf['edge_security_group_name'] = '{}-sg'.format(edge_conf['instance_name'])
-    edge_conf['notebook_instance_name'] = '{}-{}-nb'.format(edge_conf['service_base_name'],
-                                                            os.environ['project_name'])
-    edge_conf['notebook_role_profile_name'] = '{}-{}-{}-nb-Profile' \
-        .format(edge_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
-                                                                     os.environ['project_name'], os.environ['endpoint_name'])
-    edge_conf['dataengine_instances_name'] = '{}-{}-dataengine' \
-        .format(edge_conf['service_base_name'], os.environ['project_name'])
-    tag = {"Key": edge_conf['tag_name'],
-           "Value": "{}-{}-subnet".format(edge_conf['service_base_name'], os.environ['project_name'])}
-    edge_conf['private_subnet_cidr'] = get_subnet_by_tag(tag)
-    edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-    edge_conf['network_type'] = os.environ['conf_network_type']
-    if edge_conf['network_type'] == 'public':
-        edge_conf['edge_public_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
-            'Public')
-        edge_conf['edge_private_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
-            'Private')
-    elif edge_conf['network_type'] == 'private':
-        edge_conf['edge_private_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
-            'Private')
-        edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
-    edge_conf['vpc1_cidrs'] = get_vpc_cidr_by_id(os.environ['aws_vpc_id'])
+    def clear_resources():
+        dlab.actions_lib.remove_all_iam_resources('notebook', edge_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', edge_conf['project_name'])
+        dlab.actions_lib.remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+        dlab.actions_lib.remove_sgroups(edge_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(edge_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(edge_conf['instance_name'])
+        dlab.actions_lib.remove_s3('edge', edge_conf['project_name'])
+
     try:
-        edge_conf['vpc2_cidrs'] = get_vpc_cidr_by_id(os.environ['aws_notebook_vpc_id'])
-        edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs'] + edge_conf['vpc2_cidrs']))
-    except KeyError:
-        edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs']))
-
-    edge_conf['allowed_ip_cidr'] = list()
-    for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-        edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
-
-    instance_hostname = get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
-
-    if os.environ['conf_stepcerts_enabled'] == 'true':
-        step_cert_sans = ' --san {0} '.format(edge_conf['edge_private_ip'])
+        print('Generating infrastructure names and tags')
+        edge_conf = dict()
+        edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        edge_conf['key_name'] = os.environ['conf_key_name']
+        edge_conf['user_key'] = os.environ['key']
+        edge_conf['project_name'] = os.environ['project_name']
+        edge_conf['endpoint_name'] = os.environ['endpoint_name']
+        edge_conf['instance_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                                            edge_conf['endpoint_name'])
+        edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
+        edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name']).replace('_', '-').lower()
+        edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                         edge_conf['endpoint_name']
+                                                                         ).replace('_', '-').lower()
+        edge_conf['edge_security_group_name'] = '{}-{}-{}-edge-sg'.format(edge_conf['service_base_name'],
+                                                                          edge_conf['project_name'],
+                                                                          edge_conf['endpoint_name'])
+        edge_conf['notebook_instance_name'] = '{}-{}-{}-nb'.format(edge_conf['service_base_name'],
+                                                                   edge_conf['project_name'],
+                                                                   edge_conf['endpoint_name'])
+        edge_conf['notebook_role_profile_name'] = '{}-{}-{}-nb-profile'.format(edge_conf['service_base_name'],
+                                                                               edge_conf['project_name'],
+                                                                               edge_conf['endpoint_name'])
+        edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
+                                                                            edge_conf['project_name'],
+                                                                            edge_conf['endpoint_name'])
+        edge_conf['dataengine_instances_name'] = '{}-{}-{}-de'.format(edge_conf['service_base_name'],
+                                                                      edge_conf['project_name'],
+                                                                      edge_conf['endpoint_name'])
+        tag = {"Key": edge_conf['tag_name'],
+               "Value": "{}-{}-{}-subnet".format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                                 edge_conf['endpoint_name'])}
+        edge_conf['private_subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+        edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        edge_conf['network_type'] = os.environ['conf_network_type']
         if edge_conf['network_type'] == 'public':
-            step_cert_sans += ' --san {0} --san {1}'.format(
-                get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name']),
-                edge_conf['edge_public_ip'])
-    else:
-        step_cert_sans = ''
+            edge_conf['edge_public_ip'] = dlab.meta_lib.get_instance_ip_address(edge_conf['tag_name'],
+                                                                  edge_conf['instance_name']).get('Public')
+            edge_conf['edge_private_ip'] = dlab.meta_lib.get_instance_ip_address(
+                edge_conf['tag_name'], edge_conf['instance_name']).get('Private')
+        elif edge_conf['network_type'] == 'private':
+            edge_conf['edge_private_ip'] = dlab.meta_lib.get_instance_ip_address(
+                edge_conf['tag_name'], edge_conf['instance_name']).get('Private')
+            edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
+        edge_conf['vpc1_cidrs'] = dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_vpc_id'])
+        try:
+            edge_conf['vpc2_cidrs'] = dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_notebook_vpc_id'])
+            edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs'] + edge_conf['vpc2_cidrs']))
+        except KeyError:
+            edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs']))
+
+        edge_conf['allowed_ip_cidr'] = list()
+        for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+            edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+
+        edge_conf['instance_hostname'] = dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'],
+                                                                             edge_conf['instance_name'])
+        edge_conf['keyfile_name'] = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
+
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            edge_conf['step_cert_sans'] = ' --san {0} '.format(edge_conf['edge_private_ip'])
+            if edge_conf['network_type'] == 'public':
+                edge_conf['step_cert_sans'] += ' --san {0} --san {1}'.format(
+                    dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name']),
+                    edge_conf['edge_public_ip'])
+        else:
+            edge_conf['step_cert_sans'] = ''
+        if os.environ['conf_os_family'] == 'debian':
+            edge_conf['initial_user'] = 'ubuntu'
+            edge_conf['sudo_group'] = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            edge_conf['initial_user'] = 'ec2-user'
+            edge_conf['sudo_group'] = 'wheel'
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        sys.exit(1)
 
     try:
-        if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
-        if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
-
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             edge_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            edge_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -117,37 +146,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING PREREQUISITES]')
         logging.info('[INSTALLING PREREQUISITES]')
         params = "--hostname {} --keyfile {} --user {} --region {}".\
-            format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['aws_region'])
+            format(edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+                   os.environ['aws_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -155,66 +171,56 @@
         logging.info('[INSTALLING HTTP PROXY]')
         additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
                              "template_file": "/root/templates/squid.conf",
-                             "project_name": os.environ['project_name'],
+                             "project_name": edge_conf['project_name'],
                              "ldap_host": os.environ['ldap_hostname'],
                              "ldap_dn": os.environ['ldap_dn'],
                              "ldap_user": os.environ['ldap_service_username'],
                              "ldap_password": os.environ['ldap_service_password'],
                              "vpc_cidrs": edge_conf['vpc_cidrs'],
                              "allowed_ip_cidr": edge_conf['allowed_ip_cidr']}
-        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
-                 .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('configure_http_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing http proxy.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing http proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
 
     try:
         print('[INSTALLING USERs KEY]')
         logging.info('[INSTALLING USERs KEY]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": edge_conf['project_name'],
                              "user_keydir": os.environ['conf_key_dir'],
                              "user_key": edge_conf['user_key']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key." + str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing users key." + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING NGINX REVERSE PROXY]')
         logging.info('[INSTALLING NGINX REVERSE PROXY]')
-        keycloak_client_secret = str(uuid.uuid4())
+        edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
         params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
-                 "--step_cert_sans '{}' " \
-            .format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], edge_conf['service_base_name'] +
-                    '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'], keycloak_client_secret, step_cert_sans)
+                 "--step_cert_sans '{}' ".format(
+                  edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+                  '{}-{}-{}'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                    edge_conf['endpoint_name']),
+                  edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
         try:
             local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
         except:
@@ -222,36 +228,31 @@
             raise Exception
         keycloak_params = "--service_base_name {} --keycloak_auth_server_url {} --keycloak_realm_name {} " \
                           "--keycloak_user {} --keycloak_user_password {} --keycloak_client_secret {} " \
-                          "--edge_public_ip {} --hostname {} --project_name {} --endpoint_name {} " \
-            .format(edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
-                    os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
-                    os.environ['keycloak_user_password'],
-                    keycloak_client_secret, edge_conf['edge_public_ip'], instance_hostname, os.environ['project_name'], os.environ['endpoint_name'])
+                          "--edge_public_ip {} --hostname {} --project_name {} --endpoint_name {} ".format(
+                           edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
+                           os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
+                           os.environ['keycloak_user_password'], edge_conf['keycloak_client_secret'],
+                           edge_conf['instance_hostname'], edge_conf['instance_hostname'], edge_conf['project_name'],
+                           edge_conf['endpoint_name'])
         try:
             local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing nginx reverse proxy." + str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
-        remove_sgroups(edge_conf['dataengine_instances_name'])
-        remove_sgroups(edge_conf['notebook_instance_name'])
-        remove_sgroups(edge_conf['instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed installing nginx reverse proxy." + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
-        print("Hostname: {}".format(instance_hostname))
+        print("Hostname: {}".format(edge_conf['instance_hostname']))
         print("Public IP: {}".format(edge_conf['edge_public_ip']))
         print("Private IP: {}".format(edge_conf['edge_private_ip']))
-        print("Instance ID: {}".format(get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name'])))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(edge_conf['tag_name'],
+                                                                          edge_conf['instance_name'])))
         print("Key name: {}".format(edge_conf['key_name']))
         print("Bucket name: {}".format(edge_conf['bucket_name']))
         print("Shared bucket name: {}".format(edge_conf['shared_bucket_name']))
@@ -260,10 +261,10 @@
         print("Edge SG: {}".format(edge_conf['edge_security_group_name']))
         print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr']))
         with open("/root/result.json", 'w') as result:
-            res = {"hostname": instance_hostname,
+            res = {"hostname": edge_conf['instance_hostname'],
                    "public_ip": edge_conf['edge_public_ip'],
                    "ip": edge_conf['edge_private_ip'],
-                   "instance_id": get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name']),
+                   "instance_id": dlab.meta_lib.get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name']),
                    "key_name": edge_conf['key_name'],
                    "user_own_bicket_name": edge_conf['bucket_name'],
                    "shared_bucket_name": edge_conf['shared_bucket_name'],
@@ -274,13 +275,13 @@
                    "edge_sg": edge_conf['edge_security_group_name'],
                    "notebook_subnet": edge_conf['private_subnet_cidr'],
                    "full_edge_conf": edge_conf,
-                   "project_name": os.environ['project_name'],
+                   "project_name": edge_conf['project_name'],
                    "@class": "com.epam.dlab.dto.aws.edge.EdgeInfoAws",
                    "Action": "Create new EDGE server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
+        sys.exit(1)
 
-    sys.exit(0)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_start.py b/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
index 2449cd7..a9f856a 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
@@ -21,9 +21,14 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
+import logging
+import os
+import json
+
 
 
 if __name__ == "__main__":
@@ -35,29 +40,28 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     edge_conf['project_name'] = os.environ['project_name']
     edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
+    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
 
     logging.info('[START EDGE]')
     print('[START EDGE]')
     try:
-        start_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+        dlab.actions_lib.start_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to start edge.", str(err))
+        dlab.fab.append_result("Failed to start edge.", str(err))
         sys.exit(1)
 
     try:
-        instance_hostname = get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
-        addresses = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name'])
+        instance_hostname = dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
+        addresses = dlab.meta_lib.get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name'])
         ip_address = addresses.get('Private')
         public_ip_address = addresses.get('Public')
         print('[SUMMARY]')
@@ -74,7 +78,6 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_status.py b/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
index 86ff6e3..d8bd92e 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
@@ -23,20 +23,26 @@
 
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import logging
+import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Getting statuses of DLAB resources')
 
     try:
@@ -49,6 +55,5 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to collect necessary information.", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to collect necessary information.", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py b/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
index 3f99b36..3948781 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
+import os
+import logging
+import json
 
 
 if __name__ == "__main__":
@@ -35,23 +39,22 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     edge_conf['project_name'] = os.environ['project_name']
     edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
+    edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
 
     logging.info('[STOP EDGE]')
     print('[STOP EDGE]')
     try:
-        stop_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+        dlab.actions_lib.stop_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
     except Exception as err:
-        append_result("Failed to stop edge.", str(err))
+        dlab.fab.append_result("Failed to stop edge.", str(err))
         sys.exit(1)
 
     try:
@@ -60,7 +63,6 @@
                    "Action": "Stop edge server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
index 3ff45c8..cc53b22 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -42,67 +44,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(notebook_config['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -110,9 +122,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -120,36 +131,34 @@
         logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
         print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
         additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
-        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
-                    notebook_config['dlab_ssh_user'])
+        params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}".format(
+            instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
     try:
         logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
         print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
-        params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".\
-            format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'],
-                   edge_instance_private_ip)
+        params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".format(
+            instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'],
+            edge_instance_private_ip)
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -164,7 +173,7 @@
                  "--os_user {5} " \
                  "--scala_version {6} " \
                  "--r_mirror {7} " \
-                 "--ip_adress {8} " \
+                 "--ip_address {8} " \
                  "--exploratory_name {9} " \
                  "--edge_ip {10}".\
             format(instance_hostname,
@@ -184,9 +193,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -199,12 +207,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -216,12 +223,11 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -237,9 +243,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -264,92 +269,103 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
-                    os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                    os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("Image name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Jupyter URL: {}".format(jupyter_dns_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
-    print("ReverseProxyUngit".format(jupyter_ungit_access_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("Image name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Jupyter URL: {}".format(jupyter_dns_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_access_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_access_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_access_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
index b864fd4..d828df5 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -42,69 +44,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['edge_user_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'],
-                                                            notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
         params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+            (instance_hostname,  "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+             notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -123,16 +132,16 @@
         print('[CONFIGURE PROXY ON JUPYTERLAB INSTANCE]')
         additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
         params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -147,9 +156,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -165,7 +173,7 @@
                  "--os_user {} " \
                  "--scala_version {} " \
                  "--r_mirror {} " \
-                 "--ip_adress {} " \
+                 "--ip_address {} " \
                  "--exploratory_name {}".\
             format(instance_hostname,
                    keyfile_name,
@@ -184,9 +192,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyterlab.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure jupyterlab.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -199,12 +206,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -216,12 +222,11 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -237,9 +242,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -264,29 +268,27 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['shared_image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -304,9 +306,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy for docker.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -321,60 +322,69 @@
         try:
            local("~/scripts/jupyterlab_container_start.py {}".format(params))
         except:
-             traceback.print_exc()
-             raise Exception
+            traceback.print_exc()
+            raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start Jupyter container.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to start Jupyter container.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("Image name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("JupyterLab URL: {}".format(jupyter_ip_url))
-    print("JupyterLab URL: {}".format(jupyter_dns_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
-    print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_acces_url = "http://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                            notebook_config['exploratory_name'])
+        jupyter_ungit_acces_url = "http://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                               notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("Image name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("JupyterLab URL: {}".format(jupyter_ip_url))
+        print("JupyterLab URL: {}".format(jupyter_dns_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "JupyterLab",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url},
-                   #{"description": "JupyterLab (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "JupyterLab",
+                        "url": jupyter_notebook_acces_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_acces_url},
+                       #{"description": "JupyterLab (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
index 009b81e..9d44ba5 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
@@ -22,12 +22,16 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
 import traceback
 import boto3
+import logging
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -38,121 +42,137 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    create_aws_config_files()
-    print('Generating infrastructure names and tags')
-    project_conf = dict()
-    project_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    project_conf['endpoint_name'] = os.environ['endpoint_name']
-    project_conf['endpoint_tag'] = os.environ['endpoint_name']
-    project_conf['project_name'] = os.environ['project_name']
-    project_conf['project_tag'] = os.environ['project_name']
-    project_conf['key_name'] = os.environ['conf_key_name']
-    project_conf['public_subnet_id'] = os.environ['aws_subnet_id']
-    project_conf['vpc_id'] = os.environ['aws_vpc_id']
-    project_conf['region'] = os.environ['aws_region']
-    project_conf['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
-    project_conf['instance_size'] = os.environ['aws_edge_instance_size']
-    project_conf['sg_ids'] = os.environ['aws_security_groups_ids']
-    project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
-                                                                os.environ['project_name'], os.environ['endpoint_name'])
-    project_conf['tag_name'] = '{}-Tag'.format(project_conf['service_base_name'])
-    project_conf['bucket_name_tag'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
-                                                                  project_conf['project_name'],
-                                                                  project_conf['endpoint_name'])
-    project_conf['bucket_name'] = project_conf['bucket_name_tag'].lower().replace('_', '-')
-    project_conf['shared_bucket_name_tag'] = '{0}-{1}-shared-bucket'.format(
-        project_conf['service_base_name'], project_conf['endpoint_tag'])
-    project_conf['shared_bucket_name'] = project_conf['shared_bucket_name_tag'].lower().replace('_', '-')
-    project_conf['edge_role_name'] = '{}-{}-edge-Role'.format(
-        project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['edge_role_profile_name'] = '{}-{}-edge-Profile'.format(
-        project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['edge_policy_name'] = '{}-{}-edge-Policy'.format(
-        project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
-    project_conf['edge_security_group_name'] = '{}-sg'.format(project_conf['edge_instance_name'])
-    project_conf['notebook_instance_name'] = '{}-{}-nb'.format(project_conf['service_base_name'],
-                                                            os.environ['project_name'])
-    project_conf['dataengine_instances_name'] = '{}-{}-dataengine' \
-        .format(project_conf['service_base_name'], os.environ['project_name'])
-    project_conf['notebook_dataengine_role_name'] = '{}-{}-{}-nb-de-Role' \
-        .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'],os.environ['endpoint_name'])
-    project_conf['notebook_dataengine_policy_name'] = '{}-{}-{}-nb-de-Policy' \
-        .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'],os.environ['endpoint_name'])
-    project_conf['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'],os.environ['endpoint_name'])
-    project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
-                                                                     os.environ['project_name'],os.environ['endpoint_name'])
-    project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
-    project_conf['private_subnet_name'] = '{0}-{1}-subnet'.format(project_conf['service_base_name'],
-                                                               os.environ['project_name'])
-    project_conf['dataengine_master_security_group_name'] = '{}-{}-dataengine-master-sg' \
-        .format(project_conf['service_base_name'], os.environ['project_name'])
-    project_conf['dataengine_slave_security_group_name'] = '{}-{}-dataengine-slave-sg' \
-        .format(project_conf['service_base_name'], os.environ['project_name'])
-    project_conf['allowed_ip_cidr'] = list()
-    for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-        project_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ','')})
-    project_conf['network_type'] = os.environ['conf_network_type']
-    project_conf['all_ip_cidr'] = '0.0.0.0/0'
-    project_conf['zone'] = os.environ['aws_region'] + os.environ['aws_zone']
-    project_conf['elastic_ip_name'] = '{0}-{1}-edge-EIP'.format(project_conf['service_base_name'],
-                                                             os.environ['project_name'])
-    project_conf['provision_instance_ip'] = None
-    project_conf['local_endpoint'] = False
     try:
-        project_conf['provision_instance_ip'] = get_instance_ip_address(
-            project_conf['tag_name'], '{0}-{1}-endpoint'.format(project_conf['service_base_name'],
-                                                                os.environ['endpoint_name'])).get('Private') + "/32"
-    except:
-        project_conf['provision_instance_ip'] = get_instance_ip_address(project_conf['tag_name'], '{0}-ssn'.format(
-            project_conf['service_base_name'])).get('Private') + "/32"
-        project_conf['local_endpoint'] = True
-    if 'aws_user_predefined_s3_policies' not in os.environ:
-        os.environ['aws_user_predefined_s3_policies'] = 'None'
-
-    try:
-        if os.environ['conf_user_subnets_range'] == '':
-            raise KeyError
-    except KeyError:
-        os.environ['conf_user_subnets_range'] = ''
-
-    # FUSE in case of absence of user's key
-    try:
-        project_conf['user_key'] = os.environ['key']
+        dlab.actions_lib.create_aws_config_files()
+        print('Generating infrastructure names and tags')
+        project_conf = dict()
+        project_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        project_conf['endpoint_name'] = os.environ['endpoint_name']
+        project_conf['endpoint_tag'] = project_conf['endpoint_name']
+        project_conf['project_name'] = os.environ['project_name']
+        project_conf['project_tag'] = project_conf['project_name']
+        project_conf['key_name'] = os.environ['conf_key_name']
+        project_conf['public_subnet_id'] = os.environ['aws_subnet_id']
+        project_conf['vpc_id'] = os.environ['aws_vpc_id']
+        project_conf['region'] = os.environ['aws_region']
+        project_conf['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+            os.environ['conf_os_family'])])
+        project_conf['instance_size'] = os.environ['aws_edge_instance_size']
+        project_conf['sg_ids'] = os.environ['aws_security_groups_ids']
+        project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
+                                                                    project_conf['project_name'],
+                                                                    project_conf['endpoint_name'])
+        project_conf['tag_name'] = '{}-tag'.format(project_conf['service_base_name'])
+        project_conf['bucket_name_tag'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+                                                                      project_conf['project_name'],
+                                                                      project_conf['endpoint_name'])
+        project_conf['bucket_name'] = project_conf['bucket_name_tag'].lower().replace('_', '-')
+        project_conf['shared_bucket_name_tag'] = '{0}-{1}-shared-bucket'.format(
+            project_conf['service_base_name'], project_conf['endpoint_tag'])
+        project_conf['shared_bucket_name'] = project_conf['shared_bucket_name_tag'].lower().replace('_', '-')
+        project_conf['edge_role_name'] = '{}-{}-{}-edge-role'.format(project_conf['service_base_name'],
+                                                                     project_conf['project_name'],
+                                                                     project_conf['endpoint_name'])
+        project_conf['edge_role_profile_name'] = '{}-{}-{}-edge-profile'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['edge_policy_name'] = '{}-{}-{}-edge-policy'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['edge_security_group_name'] = '{}-{}-{}-edge-sg'.format(project_conf['service_base_name'],
+                                                                             project_conf['project_name'],
+                                                                             project_conf['endpoint_name'])
+        project_conf['notebook_instance_name'] = '{}-{}-{}-nb'.format(project_conf['service_base_name'],
+                                                                      project_conf['project_name'],
+                                                                      project_conf['endpoint_name'])
+        project_conf['dataengine_instances_name'] = '{}-{}-{}-de'.format(project_conf['service_base_name'],
+                                                                         project_conf['project_name'],
+                                                                         project_conf['endpoint_name'])
+        project_conf['notebook_dataengine_role_name'] = '{}-{}-{}-nb-de-role'.format(project_conf['service_base_name'],
+                                                                                     project_conf['project_name'],
+                                                                                     project_conf['endpoint_name'])
+        project_conf['notebook_dataengine_policy_name'] = '{}-{}-{}-nb-de-policy'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
+                                                                               project_conf['project_name'],
+                                                                               project_conf['endpoint_name'])
+        project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+        project_conf['private_subnet_name'] = '{0}-{1}-{2}-subnet'.format(project_conf['service_base_name'],
+                                                                          project_conf['project_name'],
+                                                                          project_conf['endpoint_name'])
+        project_conf['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+            project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+        project_conf['allowed_ip_cidr'] = list()
+        for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+            project_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ', '')})
+        project_conf['network_type'] = os.environ['conf_network_type']
+        project_conf['all_ip_cidr'] = '0.0.0.0/0'
+        project_conf['zone'] = os.environ['aws_region'] + os.environ['aws_zone']
+        project_conf['elastic_ip_name'] = '{0}-{1}-{2}-edge-static-ip'.format(project_conf['service_base_name'],
+                                                                              project_conf['project_name'],
+                                                                              project_conf['endpoint_name'])
+        project_conf['provision_instance_ip'] = None
+        project_conf['local_endpoint'] = False
         try:
-            local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
-                                                    project_conf['project_name']))
+            project_conf['provision_instance_ip'] = '{}/32'.format(dlab.meta_lib.get_instance_ip_address(
+                project_conf['tag_name'], '{0}-{1}-endpoint'.format(project_conf['service_base_name'],
+                                                                    project_conf['endpoint_name'])).get('Private'))
         except:
-            print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
-    except KeyError:
-        print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+            project_conf['provision_instance_ip'] = '{}/32'.format(dlab.meta_lib.get_instance_ip_address(
+                project_conf['tag_name'], '{0}-ssn'.format(project_conf['service_base_name'])).get('Private'))
+            project_conf['local_endpoint'] = True
+        if 'aws_user_predefined_s3_policies' not in os.environ:
+            os.environ['aws_user_predefined_s3_policies'] = 'None'
+
+        try:
+            if os.environ['conf_user_subnets_range'] == '':
+                raise KeyError
+        except KeyError:
+            os.environ['conf_user_subnets_range'] = ''
+
+        # FUSE in case of absence of user's key
+        try:
+            project_conf['user_key'] = os.environ['key']
+            try:
+                local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
+                                                        project_conf['project_name']))
+            except:
+                print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
+        except KeyError:
+            print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+            sys.exit(1)
+
+        print("Will create exploratory environment with edge node as access point as following: {}".
+              format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+        logging.info(json.dumps(project_conf))
+
+        if 'conf_additional_tags' in os.environ:
+            project_conf['bucket_additional_tags'] = ';' + os.environ['conf_additional_tags']
+            os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
+                                                 ';project_tag:{0};endpoint_tag:{1};'.format(
+                                                     project_conf['project_tag'], project_conf['endpoint_tag'])
+        else:
+            project_conf['bucket_additional_tags'] = ''
+            os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(project_conf['project_tag'],
+                                                                                           project_conf['endpoint_tag'])
+        print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
-    print("Will create exploratory environment with edge node as access point as following: {}".
-          format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
-    logging.info(json.dumps(project_conf))
-
-    if 'conf_additional_tags' in os.environ:
-        project_conf['bucket_additional_tags'] = ';' + os.environ['conf_additional_tags']
-        os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
-                                             ';project_tag:{0};endpoint_tag:{1};'.format(
-                                                 project_conf['project_tag'], project_conf['endpoint_tag'])
-    else:
-        project_conf['bucket_additional_tags'] = ''
-        os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(project_conf['project_tag'],
-                                                                                       project_conf['endpoint_tag'])
-    print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
-
     if not project_conf['local_endpoint']:
         # attach project_tag and endpoint_tag to endpoint
         try:
-            endpoint_id = get_instance_by_name(project_conf['tag_name'], '{0}-{1}-endpoint'.format(
-                project_conf['service_base_name'], os.environ['endpoint_name']))
+            endpoint_id = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'], '{0}-{1}-endpoint'.format(
+                project_conf['service_base_name'], project_conf['endpoint_name']))
             print("Endpoint id: " + endpoint_id)
             ec2 = boto3.client('ec2')
-            ec2.create_tags(Resources=[endpoint_id], Tags=[{'Key': 'project_tag', 'Value': project_conf['project_tag']},
-                                                           {'Key': 'endpoint_tag', 'Value': project_conf['endpoint_tag']}])
+            ec2.create_tags(Resources=[endpoint_id], Tags=[
+                {'Key': 'project_tag', 'Value': project_conf['project_tag']},
+                {'Key': 'endpoint_tag', 'Value': project_conf['endpoint_tag']}])
         except Exception as err:
             print("Failed to attach Project tag to Endpoint", str(err))
             traceback.print_exc()
@@ -160,41 +180,43 @@
 
     try:
         project_conf['vpc2_id'] = os.environ['aws_vpc2_id']
-        project_conf['tag_name'] = '{}-secondary-Tag'.format(project_conf['service_base_name'])
+        project_conf['tag_name'] = '{}-secondary-tag'.format(project_conf['service_base_name'])
     except KeyError:
         project_conf['vpc2_id'] = project_conf['vpc_id']
 
+
+
     try:
         logging.info('[CREATE SUBNET]')
         print('[CREATE SUBNET]')
         params = "--vpc_id '{}' --infra_tag_name {} --infra_tag_value {} --prefix {} " \
                  "--user_subnets_range '{}' --subnet_name {} --zone {}".format(
-            project_conf['vpc2_id'], project_conf['tag_name'], project_conf['service_base_name'],
-            project_conf['private_subnet_prefix'], os.environ['conf_user_subnets_range'],
-            project_conf['private_subnet_name'],
-            project_conf['zone'])
+                  project_conf['vpc2_id'], project_conf['tag_name'], project_conf['service_base_name'],
+                  project_conf['private_subnet_prefix'], os.environ['conf_user_subnets_range'],
+                  project_conf['private_subnet_name'],
+                  project_conf['zone'])
         try:
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create subnet.", str(err))
+        dlab.fab.append_result("Failed to create subnet.", str(err))
         sys.exit(1)
 
     tag = {"Key": project_conf['tag_name'],
-           "Value": "{0}-{1}-subnet".format(project_conf['service_base_name'], project_conf['project_name'])}
-    project_conf['private_subnet_cidr'] = get_subnet_by_tag(tag)
-    subnet_id = get_subnet_by_cidr(project_conf['private_subnet_cidr'], project_conf['vpc2_id'])
-    print('subnet id: {}'.format(subnet_id))
-
+           "Value": "{0}-{1}-{2}-subnet".format(project_conf['service_base_name'], project_conf['project_name'],
+                                                project_conf['endpoint_name'])}
+    project_conf['private_subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+    subnet_id = dlab.meta_lib.get_subnet_by_cidr(project_conf['private_subnet_cidr'], project_conf['vpc2_id'])
+    print('Subnet id: {}'.format(subnet_id))
     print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
 
     try:
         logging.info('[CREATE EDGE ROLES]')
         print('[CREATE EDGE ROLES]')
-        user_tag = "{0}:{0}-{1}-edge-Role".format(project_conf['service_base_name'], project_conf['project_name'])
+        user_tag = "{0}:{0}-{1}-{2}-edge-role".format(project_conf['service_base_name'], project_conf['project_name'],
+                                                      project_conf['endpoint_name'])
         params = "--role_name {} --role_profile_name {} --policy_name {} --region {} --infra_tag_name {} " \
                  "--infra_tag_value {} --user_tag_value {}" \
                  .format(project_conf['edge_role_name'], project_conf['edge_role_profile_name'],
@@ -206,14 +228,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to creating roles.", str(err))
+        dlab.fab.append_result("Failed to creating roles.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATE BACKEND (NOTEBOOK) ROLES]')
         print('[CREATE BACKEND (NOTEBOOK) ROLES]')
-        user_tag = "{0}:{0}-{1}-{2}-nb-de-Role".format(project_conf['service_base_name'], project_conf['project_name'],os.environ['endpoint_name'])
+        user_tag = "{0}:{0}-{1}-{2}-nb-de-role".format(project_conf['service_base_name'], project_conf['project_name'],
+                                                       project_conf['endpoint_name'])
         params = "--role_name {} --role_profile_name {} --policy_name {} --region {} --infra_tag_name {} " \
                  "--infra_tag_value {} --user_tag_value {}" \
                  .format(project_conf['notebook_dataengine_role_name'],
@@ -226,15 +248,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to creating roles.", str(err))
-        remove_all_iam_resources('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed to creating roles.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE SECURITY GROUP FOR EDGE NODE]')
         print('[CREATE SECURITY GROUPS FOR EDGE]')
-        edge_sg_ingress = format_sg([
+        edge_sg_ingress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": project_conf['private_subnet_cidr']}],
@@ -271,7 +292,7 @@
                 "PrefixListIds": []
             }
         ])
-        edge_sg_egress = format_sg([
+        edge_sg_egress = dlab.meta_lib.format_sg([
             {
                 "PrefixListIds": [],
                 "FromPort": 22,
@@ -390,26 +411,26 @@
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed creating security group for edge node.", str(err))
+            dlab.fab.append_result("Failed creating security group for edge node.", str(err))
             raise Exception
 
         with hide('stderr', 'running', 'warnings'):
             print('Waiting for changes to propagate')
             time.sleep(10)
     except:
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE SECURITY GROUP FOR PRIVATE SUBNET]')
         print('[CREATE SECURITY GROUP FOR PRIVATE SUBNET]')
-        project_group_id = check_security_group(project_conf['edge_security_group_name'])
+        project_group_id = dlab.meta_lib.check_security_group(project_conf['edge_security_group_name'])
         sg_list = project_conf['sg_ids'].replace(" ", "").split(',')
         rules_list = []
         for i in sg_list:
             rules_list.append({"GroupId": i})
-        private_sg_ingress = format_sg([
+        private_sg_ingress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [],
@@ -430,7 +451,7 @@
             }
         ])
 
-        private_sg_egress = format_sg([
+        private_sg_egress = dlab.meta_lib.format_sg([
             {
                 "IpProtocol": "-1",
                 "IpRanges": [{"CidrIp": project_conf['private_subnet_cidr']}],
@@ -475,12 +496,11 @@
             print('Waiting for changes to propagate')
             time.sleep(10)
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating security group for private subnet.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed creating security group for private subnet.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     logging.info('[CREATING SECURITY GROUPS FOR MASTER NODE]')
@@ -498,12 +518,11 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create sg.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed to create sg.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     logging.info('[CREATING SECURITY GROUPS FOR SLAVE NODES]')
@@ -521,47 +540,48 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create security group.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed to create security group.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE BUCKETS]')
         print('[CREATE BUCKETS]')
-        project_conf['shared_bucket_tags'] = 'endpoint_tag:{0};{1}:{2};{3}:{4}{5}'.format(project_conf['endpoint_tag'],
-                                                                                  os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
-                                                                                  project_conf['tag_name'], project_conf['shared_bucket_name'], project_conf['bucket_additional_tags']).replace(';', ',')
+        project_conf['shared_bucket_tags'] = 'endpoint_tag:{0};{1}:{2};{3}:{4}{5}'.format(
+            project_conf['endpoint_tag'], os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
+            project_conf['tag_name'], project_conf['shared_bucket_name'],
+            project_conf['bucket_additional_tags']).replace(';', ',')
         params = "--bucket_name {} --bucket_tags {} --region {} --bucket_name_tag {}". \
-            format(project_conf['shared_bucket_name'], project_conf['shared_bucket_tags'], project_conf['region'], project_conf['shared_bucket_name_tag'])
+            format(project_conf['shared_bucket_name'], project_conf['shared_bucket_tags'], project_conf['region'],
+                   project_conf['shared_bucket_name_tag'])
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
         except:
             traceback.print_exc()
             raise Exception
-        project_conf['bucket_tags'] = 'endpoint_tag:{0};{1}:{2};project_tag:{3};{4}:{5}{6}'.format(project_conf['endpoint_tag'],
-                                                                                  os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
-                                                                                  project_conf['project_tag'],
-                                                                                  project_conf['tag_name'], project_conf['bucket_name'], project_conf['bucket_additional_tags']).replace(';', ',')
+        project_conf['bucket_tags'] = 'endpoint_tag:{0};{1}:{2};project_tag:{3};{4}:{5}{6}'.format(
+            project_conf['endpoint_tag'], os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
+            project_conf['project_tag'], project_conf['tag_name'], project_conf['bucket_name'],
+            project_conf['bucket_additional_tags']).replace(';', ',')
         params = "--bucket_name {} --bucket_tags {} --region {} --bucket_name_tag {}" \
-                 .format(project_conf['bucket_name'], project_conf['bucket_tags'], project_conf['region'], project_conf['bucket_name_tag'])
+                 .format(project_conf['bucket_name'], project_conf['bucket_tags'], project_conf['region'],
+                         project_conf['bucket_name_tag'])
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create buckets.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
+        dlab.fab.append_result("Failed to create buckets.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
         sys.exit(1)
 
     try:
@@ -569,26 +589,24 @@
         print('[CREATING BUCKET POLICY FOR USER INSTANCES]')
         params = '--bucket_name {} --shared_bucket_name {} --username {} --edge_role_name {} ' \
                  '--notebook_role_name {} --service_base_name {} --region {} ' \
-                 '--user_predefined_s3_policies "{}"'.format(project_conf['bucket_name'],
-                                                             project_conf['shared_bucket_name'],
-                                                             os.environ['project_name'], project_conf['edge_role_name'],
-                                                             project_conf['notebook_dataengine_role_name'],
-                                                             project_conf['service_base_name'], project_conf['region'],
-                                                             os.environ['aws_user_predefined_s3_policies'])
+                 '--user_predefined_s3_policies "{}" --endpoint_name {}'.format(
+                  project_conf['bucket_name'], project_conf['shared_bucket_name'], project_conf['project_name'],
+                  project_conf['edge_role_name'], project_conf['notebook_dataengine_role_name'],
+                  project_conf['service_base_name'], project_conf['region'],
+                  os.environ['aws_user_predefined_s3_policies'], project_conf['endpoint_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_policy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create bucket policy.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed to create bucket policy.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+        dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
         sys.exit(1)
 
     try:
@@ -602,27 +620,27 @@
                     project_conf['edge_instance_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
-            edge_instance = get_instance_by_name(project_conf['tag_name'], project_conf['edge_instance_name'])
+            edge_instance = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+                                                               project_conf['edge_instance_name'])
         except:
             traceback.print_exc()
             raise Exception
-
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        remove_all_iam_resources('notebook', os.environ['project_name'])
-        remove_all_iam_resources('edge', os.environ['project_name'])
-        remove_sgroups(project_conf['dataengine_instances_name'])
-        remove_sgroups(project_conf['notebook_instance_name'])
-        remove_sgroups(project_conf['edge_instance_name'])
-        remove_s3('edge', os.environ['project_name'])
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+        dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+        dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+        dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
         sys.exit(1)
 
     if project_conf['network_type'] == 'public':
         try:
             logging.info('[ASSOCIATING ELASTIC IP]')
             print('[ASSOCIATING ELASTIC IP]')
-            project_conf['edge_id'] = get_instance_by_name(project_conf['tag_name'], project_conf['edge_instance_name'])
+            project_conf['edge_id'] = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+                                                                         project_conf['edge_instance_name'])
             try:
                 project_conf['elastic_ip'] = os.environ['edge_elastic_ip']
             except:
@@ -636,19 +654,19 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to associate elastic ip.", str(err))
+            dlab.fab.append_result("Failed to associate elastic ip.", str(err))
             try:
-                project_conf['edge_public_ip'] = get_instance_ip_address(project_conf['tag_name'],
-                                                                      project_conf['edge_instance_name']).get('Public')
-                project_conf['allocation_id'] = get_allocation_id_by_elastic_ip(project_conf['edge_public_ip'])
+                project_conf['edge_public_ip'] = dlab.meta_lib.get_instance_ip_address(
+                    project_conf['tag_name'], project_conf['edge_instance_name']).get('Public')
+                project_conf['allocation_id'] = dlab.meta_lib.get_allocation_id_by_elastic_ip(
+                    project_conf['edge_public_ip'])
             except:
                 print("No Elastic IPs to release!")
-            remove_ec2(project_conf['tag_name'], project_conf['edge_instance_name'])
-            remove_all_iam_resources('notebook', os.environ['project_name'])
-            remove_all_iam_resources('edge', os.environ['project_name'])
-            remove_sgroups(project_conf['dataengine_instances_name'])
-            remove_sgroups(project_conf['notebook_instance_name'])
-            remove_sgroups(project_conf['edge_instance_name'])
-            remove_s3('edge', os.environ['project_name'])
-            sys.exit(1)
\ No newline at end of file
+            dlab.actions_lib.remove_ec2(project_conf['tag_name'], project_conf['edge_instance_name'])
+            dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+            dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+            dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+            dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+            dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+            dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
+            sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
index c0ec4d5..7559864 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
@@ -22,18 +22,22 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import traceback
+import logging
 import boto3
 import requests
 
 
-def terminate_edge_node(tag_name, project_name, tag_value, nb_sg, edge_sg, de_sg, emr_sg):
+def terminate_edge_node(tag_name, project_name, tag_value, nb_sg, edge_sg, de_sg, emr_sg, endpoint_name):
     print('Terminating EMR cluster')
     try:
-        clusters_list = get_emr_list(tag_name)
+        clusters_list = dlab.meta_lib.get_emr_list(tag_name)
         if clusters_list:
             for cluster_id in clusters_list:
                 client = boto3.client('emr')
@@ -41,89 +45,105 @@
                 cluster = cluster.get("Cluster")
                 emr_name = cluster.get('Name')
                 if '{}'.format(tag_value[:-1]) in emr_name:
-                    terminate_emr(cluster_id)
+                    dlab.actions_lib.terminate_emr(cluster_id)
                     print("The EMR cluster {} has been terminated successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
         sys.exit(1)
 
     print("Terminating EDGE and notebook instances")
     try:
-        remove_ec2(tag_name, tag_value)
-    except:
+        dlab.actions_lib.remove_ec2(tag_name, tag_value)
+    except Exception as err:
+        dlab.fab.append_result("Failed to terminate instances.", str(err))
         sys.exit(1)
 
     print("Removing s3 bucket")
     try:
-        remove_s3('edge', project_name)
-    except:
+        dlab.actions_lib.remove_s3('edge', project_name)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove buckets.", str(err))
         sys.exit(1)
 
     print("Removing IAM roles and profiles")
     try:
-        remove_all_iam_resources('notebook', project_name)
-        remove_all_iam_resources('edge', project_name)
-    except:
+        dlab.actions_lib.remove_all_iam_resources('notebook', project_name, endpoint_name)
+        dlab.actions_lib.remove_all_iam_resources('edge', project_name, endpoint_name)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove IAM roles and profiles.", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        remove_sgroups(emr_sg)
-        remove_sgroups(de_sg)
-        remove_sgroups(nb_sg)
-        remove_sgroups(edge_sg)
-    except:
+        dlab.actions_lib.remove_sgroups(emr_sg)
+        dlab.actions_lib.remove_sgroups(de_sg)
+        dlab.actions_lib.remove_sgroups(nb_sg)
+        dlab.actions_lib.remove_sgroups(edge_sg)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove Security Groups.", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        remove_subnets(tag_value)
-    except:
+        dlab.actions_lib.remove_subnets(tag_value)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove subnets.", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/project/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     # generating variables dictionary
-    create_aws_config_files()
+    dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     project_conf = dict()
-    project_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    project_conf['endpoint_name'] = '{}-{}-endpoint'.format(project_conf['service_base_name'], os.environ['endpoint_name'])
+    project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     project_conf['project_name'] = os.environ['project_name']
-    project_conf['tag_name'] = project_conf['service_base_name'] + '-Tag'
-    project_conf['tag_value'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-*'
-    project_conf['edge_sg'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-edge'
-    project_conf['nb_sg'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-nb'
+    project_conf['endpoint_name'] = os.environ['endpoint_name']
+    project_conf['endpoint_instance_name'] = '{}-{}-endpoint'.format(project_conf['service_base_name'],
+                                                                     project_conf['endpoint_name'])
+    project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
+    project_conf['tag_value'] = '{}-{}-{}-*'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                                    project_conf['endpoint_name'])
+    project_conf['edge_sg'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                                     project_conf['endpoint_name'])
+    project_conf['nb_sg'] = '{}-{}-{}-nb'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                                 project_conf['endpoint_name'])
     project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
-                                                                os.environ['project_name'], os.environ['endpoint_name'])
-    project_conf['de_sg'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + \
-                                             '-dataengine*'
-    project_conf['emr_sg'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-des-*'
+                                                                project_conf['project_name'],
+                                                                project_conf['endpoint_name'])
+    project_conf['de_sg'] = '{}-{}-{}-de*'.format(project_conf['service_base_name'],
+                                                  project_conf['project_name'],
+                                                  project_conf['endpoint_name'])
+    project_conf['emr_sg'] = '{}-{}-{}-des-*'.format(project_conf['service_base_name'],
+                                                     project_conf['project_name'],
+                                                     project_conf['endpoint_name'])
 
     try:
         logging.info('[TERMINATE PROJECT]')
         print('[TERMINATE PROJECT]')
         try:
             terminate_edge_node(project_conf['tag_name'], project_conf['project_name'], project_conf['tag_value'],
-                                project_conf['nb_sg'], project_conf['edge_sg'], project_conf['de_sg'], project_conf['emr_sg'])
+                                project_conf['nb_sg'], project_conf['edge_sg'], project_conf['de_sg'],
+                                project_conf['emr_sg'], project_conf['endpoint_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate project.", str(err))
+            dlab.fab.append_result("Failed to terminate project.", str(err))
     except Exception as err:
         print('Error: {0}'.format(err))
         sys.exit(1)
 
     try:
-        endpoint_id = get_instance_by_name(project_conf['tag_name'], project_conf['endpoint_name'])
+        endpoint_id = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+                                                         project_conf['endpoint_instance_name'])
         print("Endpoint id: " + endpoint_id)
         ec2 = boto3.client('ec2')
         ec2.delete_tags(Resources=[endpoint_id], Tags=[{'Key': 'project_tag'}, {'Key': 'endpoint_tag'}])
@@ -148,7 +168,8 @@
         }
 
         client_params = {
-            "clientId": project_conf['service_base_name'] + '-' + project_conf['project_name'] + '-' + os.environ['endpoint_name'],
+            "clientId": '{}-{}-{}'.format(project_conf['service_base_name'], project_conf['project_name'],
+                                          project_conf['endpoint_name'])
         }
 
         keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
@@ -163,8 +184,10 @@
                                                                                os.environ['keycloak_realm_name'],
                                                                                keycloak_id_client)
 
-        keycloak_client = requests.delete(keycloak_client_delete_url, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
-                                                 "Content-Type": "application/json"})
+        keycloak_client = requests.delete(
+            keycloak_client_delete_url,
+            headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+                     "Content-Type": "application/json"})
     except Exception as err:
         print("Failed to remove project client from Keycloak", str(err))
 
@@ -175,6 +198,6 @@
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
index 574ad47..dd2a93c 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -43,70 +45,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['rstudio_pass'] = id_generator()
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -114,9 +124,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -133,9 +142,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -151,9 +159,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring R_STUDIO and all dependencies
@@ -163,7 +170,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
             .format(instance_hostname, keyfile_name,
                     os.environ['aws_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -175,9 +182,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure rstudio.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure rstudio.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -193,9 +199,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -206,12 +211,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -227,9 +231,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -240,103 +243,110 @@
             'tensor': False
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio', notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(`notebook_config['expected_image_name']`)
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    rstudio_dns_url = "http://" + dns_name + ":8787/"
-    rstudio_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio URL: {}".format(rstudio_dns_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        rstudio_dns_url = "http://" + dns_name + ":8787/"
+        rstudio_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio URL: {}".format(rstudio_dns_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "RStudio",
-                    "url": rstudio_notebook_access_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_access_url}#,
-                   #{"description": "RStudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "RStudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "RStudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
index 7be6f6c..5ac16f5 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
@@ -21,12 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os, json
+import logging
+import sys
+import os
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import traceback
+import json
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -34,84 +38,108 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
+
+    ssn_conf = dict()
+    ssn_conf['instance'] = 'ssn'
+
+    def clear_resources():
+        if ssn_conf['domain_created']:
+            dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'],
+                                                    os.environ['ssn_hosted_zone_name'],
+                                                    os.environ['ssn_subdomain'])
+        dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+        dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+        dlab.actions_lib.remove_s3(ssn_conf['instance'])
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
+            try:
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+            except:
+                print("There are no VPC Endpoints")
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
 
     try:
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-        service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
-        role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
-        policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
-        ssn_bucket_name_tag = service_base_name + '-ssn-bucket'
-        default_endpoint_name = os.environ['default_endpoint_name']
-        shared_bucket_name_tag = '{0}-{1}-shared-bucket'.format(service_base_name, default_endpoint_name)
-        ssn_bucket_name = ssn_bucket_name_tag.lower().replace('_', '-')
-        shared_bucket_name = shared_bucket_name_tag.lower().replace('_', '-')
-        tag_name = service_base_name + '-Tag'
-        tag2_name = service_base_name + '-secondary-Tag'
-        instance_name = service_base_name + '-ssn'
-        region = os.environ['aws_region']
-        ssn_image_name = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
-        ssn_ami_id = get_ami_id(ssn_image_name)
-        policy_path = '/root/files/ssn_policy.json'
-        vpc_cidr = os.environ['conf_vpc_cidr']
-        vpc2_cidr = os.environ['conf_vpc2_cidr']
-        sg_name = instance_name + '-sg'
-        pre_defined_vpc = False
-        pre_defined_subnet = False
-        pre_defined_sg = False
-        billing_enabled = True
-        dlab_ssh_user = os.environ['conf_os_user']
-        network_type = os.environ['conf_network_type']
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and \
                 'ssn_subdomain' in os.environ:
-            domain_created = True
+            ssn_conf['domain_created'] = True
         else:
-            domain_created = False
+            ssn_conf['domain_created'] = False
+        ssn_conf['pre_defined_vpc'] = False
+        ssn_conf['pre_defined_subnet'] = False
+        ssn_conf['pre_defined_sg'] = False
+        ssn_conf['billing_enabled'] = True
+        ssn_conf['role_name'] = '{}-ssn-role'.format(ssn_conf['service_base_name'])
+        ssn_conf['role_profile_name'] = '{}-ssn-profile'.format(ssn_conf['service_base_name'])
+        ssn_conf['policy_name'] = '{}-ssn-policy'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag_name'] = '{}-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag2_name'] = '{}-secondary-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['user_tag'] = "{0}:{0}-ssn-role".format(ssn_conf['service_base_name'])
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['region'] = os.environ['aws_region']
+        ssn_conf['ssn_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+        ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+        ssn_conf['sg_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+        ssn_conf['network_type'] = os.environ['conf_network_type']
+        ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
 
         try:
             if os.environ['aws_vpc_id'] == '':
                 raise KeyError
         except KeyError:
-            tag = {"Key": tag_name, "Value": "{}-subnet".format(service_base_name)}
-            os.environ['aws_vpc_id'] = get_vpc_by_tag(tag_name, service_base_name)
-            pre_defined_vpc = True
+            ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+            os.environ['aws_vpc_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'], ssn_conf['service_base_name'])
+            ssn_conf['pre_defined_vpc'] = True
         try:
             if os.environ['aws_subnet_id'] == '':
                 raise KeyError
         except KeyError:
-            tag = {"Key": tag_name, "Value": "{}-subnet".format(service_base_name)}
-            os.environ['aws_subnet_id'] = get_subnet_by_tag(tag, True)
-            pre_defined_subnet = True
+            ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+            os.environ['aws_subnet_id'] = dlab.meta_lib.get_subnet_by_tag(ssn_conf['tag'], True)
+            ssn_conf['pre_defined_subnet'] = True
         try:
             if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
                 raise KeyError
         except KeyError:
-            tag = {"Key": tag2_name, "Value": "{}-subnet".format(service_base_name)}
-            os.environ['aws_vpc2_id'] = get_vpc_by_tag(tag2_name, service_base_name)
-            pre_defined_vpc2 = True
+            ssn_conf['tag'] = {"Key": ssn_conf['tag2_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+            os.environ['aws_vpc2_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
+                                                                     ssn_conf['service_base_name'])
+            ssn_conf['pre_defined_vpc2'] = True
         try:
             if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_peering_id']:
                 raise KeyError
         except KeyError:
-            os.environ['aws_peering_id'] = get_peering_by_tag(tag_name, service_base_name)
-            pre_defined_peering = True
+            os.environ['aws_peering_id'] = dlab.meta_lib.get_peering_by_tag(ssn_conf['tag_name'],
+                                                                            ssn_conf['service_base_name'])
+            ssn_conf['pre_defined_peering'] = True
         try:
             if os.environ['aws_security_groups_ids'] == '':
                 raise KeyError
         except KeyError:
-            os.environ['aws_security_groups_ids'] = get_security_group_by_name(sg_name)
-            pre_defined_sg = True
+            os.environ['aws_security_groups_ids'] = dlab.meta_lib.get_security_group_by_name(ssn_conf['sg_name'])
+            ssn_conf['pre_defined_sg'] = True
         try:
             if os.environ['aws_account_id'] == '':
                 raise KeyError
             if os.environ['aws_billing_bucket'] == '':
                 raise KeyError
         except KeyError:
-            billing_enabled = False
-        if not billing_enabled:
+            ssn_conf['billing_enabled'] = False
+        if not ssn_conf['billing_enabled']:
             os.environ['aws_account_id'] = 'None'
             os.environ['aws_billing_bucket'] = 'None'
         try:
@@ -120,36 +148,41 @@
         except KeyError:
             os.environ['aws_report_path'] = ''
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            ssn_conf['initial_user'] = 'ubuntu'
+            ssn_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            ssn_conf['initial_user'] = 'ec2-user'
+            ssn_conf['sudo_group'] = 'wheel'
 
-        if network_type == 'private':
-            instance_hostname = get_instance_ip_address(tag_name, instance_name).get('Private')
+        if ssn_conf['network_type'] == 'private':
+            ssn_conf['instance_hostname'] = dlab.meta_lib.get_instance_ip_address(
+                ssn_conf['tag_name'], ssn_conf['instance_name']).get('Private')
         else:
-            instance_hostname = get_instance_hostname(tag_name, instance_name)
+            ssn_conf['instance_hostname'] = dlab.meta_lib.get_instance_hostname(
+                ssn_conf['tag_name'], ssn_conf['instance_name'])
 
         if os.environ['conf_stepcerts_enabled'] == 'true':
-            step_cert_sans = ' --san {0} '.format(get_instance_ip_address(tag_name, instance_name).get('Private'))
-            if network_type == 'public':
-                step_cert_sans += ' --san {0} --san {1}'.format(
-                    get_instance_hostname(tag_name, instance_name),
-                    get_instance_ip_address(tag_name, instance_name).get('Public'))
+            ssn_conf['step_cert_sans'] = ' --san {0} '.format(dlab.meta_lib.get_instance_ip_address(
+                ssn_conf['tag_name'], ssn_conf['instance_name']).get('Private'))
+            if ssn_conf['network_type'] == 'public':
+                ssn_conf['step_cert_sans'] += ' --san {0} --san {1}'.format(
+                    dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'], ssn_conf['instance_name']),
+                    dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+                                                          ssn_conf['instance_name']).get('Public'))
         else:
-            step_cert_sans = ''
+            ssn_conf['step_cert_sans'] = ''
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             dlab_ssh_user, sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            ssn_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            ssn_conf['initial_user'], ssn_conf['dlab_ssh_user'], ssn_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -157,30 +190,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -188,8 +199,8 @@
         print('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
         params = "--hostname {} --keyfile {} --pip_packages 'boto3 backoff argparse fabric==1.14.0 awscli pymongo " \
                  "pyyaml jinja2' --user {} --region {}". \
-            format(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", dlab_ssh_user,
-                   os.environ['aws_region'])
+            format(ssn_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+                   ssn_conf['dlab_ssh_user'], os.environ['aws_region'])
 
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
@@ -197,44 +208,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed installing software: pip, packages.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE SSN INSTANCE]')
         print('[CONFIGURE SSN INSTANCE]')
-        additional_config = {"nginx_template_dir": "/root/templates/", "service_base_name": service_base_name,
+        additional_config = {"nginx_template_dir": "/root/templates/", "service_base_name":
+                             ssn_conf['service_base_name'],
                              "security_group_id": os.environ['aws_security_groups_ids'],
                              "vpc_id": os.environ['aws_vpc_id'], "subnet_id": os.environ['aws_subnet_id'],
                              "admin_key": os.environ['conf_key_name']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
                  "--tag_resource_id {} --step_cert_sans '{}' ".format(
-                  instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
-                  json.dumps(additional_config), dlab_ssh_user, os.environ['ssn_dlab_path'],
-                  os.environ['conf_tag_resource_id'], step_cert_sans)
+                  ssn_conf['instance_hostname'],
+                  "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+                  json.dumps(additional_config), ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
+                  os.environ['conf_tag_resource_id'], ssn_conf['step_cert_sans'])
 
         try:
             local("~/scripts/{}.py {}".format('configure_ssn_node', params))
@@ -242,30 +233,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Failed configuring ssn.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Failed configuring ssn.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -284,11 +253,11 @@
                              {"name": "dataengine-service", "tag": "latest"},
                              {"name": "dataengine", "tag": "latest"}]
         params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
-                 "--cloud_provider {} --region {}".format(instance_hostname,
+                 "--cloud_provider {} --region {}".format(ssn_conf['instance_hostname'],
                                                           "{}{}.pem".format(os.environ['conf_key_dir'],
                                                                             os.environ['conf_key_name']),
                                                           json.dumps(additional_config), os.environ['conf_os_family'],
-                                                          dlab_ssh_user, os.environ['ssn_dlab_path'],
+                                                          ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
                                                           os.environ['conf_cloud_provider'], os.environ['aws_region'])
 
         try:
@@ -297,61 +266,16 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Unable to configure docker.", str(err))
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Unable to configure docker.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        # mongo_parameters = {
-        #     "aws_region": os.environ['aws_region'],
-        #     "aws_vpc_id": os.environ['aws_vpc_id'],
-        #     "aws_subnet_id": os.environ['aws_subnet_id'],
-        #     "conf_service_base_name": service_base_name,
-        #     "aws_security_groups_ids": os.environ['aws_security_groups_ids'].replace(" ", ""),
-        #     "conf_os_family": os.environ['conf_os_family'],
-        #     "conf_tag_resource_id": os.environ['conf_tag_resource_id'],
-        #     "conf_key_dir": os.environ['conf_key_dir'],
-        #     "ssn_instance_size": os.environ['aws_ssn_instance_size'],
-        #     "edge_instance_size": os.environ['aws_edge_instance_size']
-        # }
-        # if os.environ['conf_duo_vpc_enable'] == 'true':
-        #     secondary_parameters = {
-        #         "aws_notebook_vpc_id": os.environ['aws_vpc2_id'],
-        #         "aws_notebook_subnet_id": os.environ['aws_subnet_id'],
-        #         "aws_peering_id": os.environ['aws_peering_id']
-        #     }
-        # else:
-        #     secondary_parameters = {
-        #         "aws_notebook_vpc_id": os.environ['aws_vpc_id'],
-        #         "aws_notebook_subnet_id": os.environ['aws_subnet_id'],
-        #     }
-        # mongo_parameters.update(secondary_parameters)
         cloud_params = [
             {
                 'key': 'KEYCLOAK_REDIRECT_URI',
-                'value': "https://{0}/".format(get_instance_hostname(tag_name, instance_name))
+                'value': "https://{0}/".format(dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'],
+                                                                                   ssn_conf['instance_name']))
             },
             {
                 'key': 'KEYCLOAK_REALM_NAME',
@@ -586,14 +510,14 @@
                  "--resource_id {} " \
                  "--default_endpoint_name {} " \
                  "--tags {}". \
-            format(instance_hostname,
+            format(ssn_conf['instance_hostname'],
                    "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
                    os.environ['ssn_dlab_path'],
-                   dlab_ssh_user,
+                   ssn_conf['dlab_ssh_user'],
                    os.environ['conf_os_family'],
                    os.environ['request_id'],
                    os.environ['conf_resource'],
-                   service_base_name,
+                   ssn_conf['service_base_name'],
                    os.environ['conf_tag_resource_id'],
                    os.environ['conf_billing_tag'],
                    os.environ['conf_cloud_provider'],
@@ -601,7 +525,7 @@
                    os.environ['aws_billing_bucket'],
                    os.environ['aws_job_enabled'],
                    os.environ['aws_report_path'],
-                   billing_enabled,
+                   ssn_conf['billing_enabled'],
                    json.dumps(cloud_params),
                    os.environ['dlab_id'],
                    os.environ['usage_date'],
@@ -621,57 +545,36 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        append_result("Unable to configure UI.", str(err))
-        print(err)
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+        dlab.fab.append_result("Unable to configure UI.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         logging.info('[SUMMARY]')
         print('[SUMMARY]')
-        print("Service base name: {}".format(service_base_name))
-        print("SSN Name: {}".format(instance_name))
-        print("SSN Hostname: {}".format(instance_hostname))
-        print("Role name: {}".format(role_name))
-        print("Role profile name: {}".format(role_profile_name))
-        print("Policy name: {}".format(policy_name))
+        print("Service base name: {}".format(ssn_conf['service_base_name']))
+        print("SSN Name: {}".format(ssn_conf['instance_name']))
+        print("SSN Hostname: {}".format(ssn_conf['instance_hostname']))
+        print("Role name: {}".format(ssn_conf['role_name']))
+        print("Role profile name: {}".format(ssn_conf['role_profile_name']))
+        print("Policy name: {}".format(ssn_conf['policy_name']))
         print("Key name: {}".format(os.environ['conf_key_name']))
         print("VPC ID: {}".format(os.environ['aws_vpc_id']))
         print("Subnet ID: {}".format(os.environ['aws_subnet_id']))
         print("Security IDs: {}".format(os.environ['aws_security_groups_ids']))
         print("SSN instance shape: {}".format(os.environ['aws_ssn_instance_size']))
-        print("SSN AMI name: {}".format(ssn_image_name))
-        print("SSN bucket name: {}".format(ssn_bucket_name))
-        print("Shared bucket name: {}".format(shared_bucket_name))
-        print("Region: {}".format(region))
-        jenkins_url = "http://{}/jenkins".format(get_instance_hostname(tag_name, instance_name))
-        jenkins_url_https = "https://{}/jenkins".format(get_instance_hostname(tag_name, instance_name))
-        print("Jenkins URL: {}".format(jenkins_url))
-        print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
-        print("DLab UI HTTP URL: http://{}".format(get_instance_hostname(tag_name, instance_name)))
-        print("DLab UI HTTPS URL: https://{}".format(get_instance_hostname(tag_name, instance_name)))
+        print("SSN AMI name: {}".format(ssn_conf['ssn_image_name']))
+        print("Region: {}".format(ssn_conf['region']))
+        ssn_conf['jenkins_url'] = "http://{}/jenkins".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name']))
+        ssn_conf['jenkins_url_https'] = "https://{}/jenkins".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name']))
+        print("Jenkins URL: {}".format(ssn_conf['jenkins_url']))
+        print("Jenkins URL HTTPS: {}".format(ssn_conf['jenkins_url_https']))
+        print("DLab UI HTTP URL: http://{}".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name'])))
+        print("DLab UI HTTPS URL: https://{}".format(dlab.meta_lib.get_instance_hostname(
+            ssn_conf['tag_name'], ssn_conf['instance_name'])))
         try:
             with open('jenkins_creds.txt') as f:
                 print(f.read())
@@ -679,26 +582,26 @@
             print("Jenkins is either configured already or have issues in configuration routine.")
 
         with open("/root/result.json", 'w') as f:
-            res = {"service_base_name": service_base_name,
-                   "instance_name": instance_name,
-                   "instance_hostname": get_instance_hostname(tag_name, instance_name),
-                   "role_name": role_name,
-                   "role_profile_name": role_profile_name,
-                   "policy_name": policy_name,
+            res = {"service_base_name": ssn_conf['service_base_name'],
+                   "instance_name": ssn_conf['instance_name'],
+                   "instance_hostname": dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'],
+                                                                            ssn_conf['instance_name']),
+                   "role_name": ssn_conf['role_name'],
+                   "role_profile_name": ssn_conf['role_profile_name'],
+                   "policy_name": ssn_conf['policy_name'],
                    "master_keyname": os.environ['conf_key_name'],
                    "vpc_id": os.environ['aws_vpc_id'],
                    "subnet_id": os.environ['aws_subnet_id'],
                    "security_id": os.environ['aws_security_groups_ids'],
                    "instance_shape": os.environ['aws_ssn_instance_size'],
-                   "bucket_name": ssn_bucket_name,
-                   "shared_bucket_name": shared_bucket_name,
-                   "region": region,
+                   "region": ssn_conf['region'],
                    "action": "Create SSN instance"}
             f.write(json.dumps(res))
 
         print('Upload response file')
         params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
-            format(instance_name, local_log_filepath, dlab_ssh_user, instance_hostname)
+            format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'],
+                   ssn_conf['instance_hostname'])
         local("~/scripts/{}.py {}".format('upload_response_file', params))
 
         logging.info('[FINALIZE]')
@@ -707,28 +610,7 @@
         if os.environ['conf_lifecycle_stage'] == 'prod':
             params += "--key_id {}".format(os.environ['aws_access_key'])
             local("~/scripts/{}.py {}".format('ssn_finalize', params))
-    except:
-        if domain_created:
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                   os.environ['ssn_subdomain'])
-        remove_ec2(tag_name, instance_name)
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
-            try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-            except:
-                print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
index 8615a25..45c65f2 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
@@ -21,333 +21,349 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import logging
+import sys
+import os
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
+import json
 
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
-    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
-    pre_defined_vpc = False
-    pre_defined_subnet = False
-    pre_defined_sg = False
-    pre_defined_vpc2 = False
+    ssn_conf = dict()
+    ssn_conf['instance'] = 'ssn'
+    ssn_conf['pre_defined_vpc'] = False
+    ssn_conf['pre_defined_subnet'] = False
+    ssn_conf['pre_defined_sg'] = False
+    ssn_conf['pre_defined_vpc2'] = False
     try:
         logging.info('[CREATE AWS CONFIG FILE]')
         print('[CREATE AWS CONFIG FILE]')
         if 'aws_access_key' in os.environ and 'aws_secret_access_key' in os.environ:
-            create_aws_config_files(generate_full_config=True)
+            dlab.actions_lib.create_aws_config_files(generate_full_config=True)
         else:
-            create_aws_config_files()
+            dlab.actions_lib.create_aws_config_files()
     except Exception as err:
-        print('Error: {0}'.format(err))
         logging.info('Unable to create configuration')
-        append_result("Unable to create configuration")
+        dlab.fab.append_result("Unable to create configuration", err)
         traceback.print_exc()
         sys.exit(1)
 
     try:
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-        service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-        role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
-        role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
-        policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
-        default_endpoint_name = os.environ['default_endpoint_name']
-        tag_name = service_base_name + '-Tag'
-        tag2_name = service_base_name + '-secondary-Tag'
-        user_tag = "{0}:{0}-ssn-Role".format(service_base_name)
-        instance_name = service_base_name + '-ssn'
-        region = os.environ['aws_region']
-        zone_full = os.environ['aws_region'] + os.environ['aws_zone']
-        ssn_image_name = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
-        ssn_ami_id = get_ami_id(ssn_image_name)
-        policy_path = '/root/files/ssn_policy.json'
-        vpc_cidr = os.environ['conf_vpc_cidr']
-        vpc2_cidr = os.environ['conf_vpc2_cidr']
-        vpc_name = '{}-VPC'.format(service_base_name)
-        vpc2_name = '{}-secondary-VPC'.format(service_base_name)
-        subnet_name = '{}-subnet'.format(service_base_name)
-        allowed_ip_cidr = list()
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        ssn_conf['role_name'] = '{}-ssn-role'.format(ssn_conf['service_base_name'])
+        ssn_conf['role_profile_name'] = '{}-ssn-profile'.format(ssn_conf['service_base_name'])
+        ssn_conf['policy_name'] = '{}-ssn-policy'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag_name'] = '{}-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['tag2_name'] = '{}-secondary-tag'.format(ssn_conf['service_base_name'])
+        ssn_conf['user_tag'] = "{0}:{0}-ssn-role".format(ssn_conf['service_base_name'])
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['region'] = os.environ['aws_region']
+        ssn_conf['zone_full'] = os.environ['aws_region'] + os.environ['aws_zone']
+        ssn_conf['ssn_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+        ssn_conf['ssn_ami_id'] = dlab.meta_lib.get_ami_id(ssn_conf['ssn_image_name'])
+        ssn_conf['policy_path'] = '/root/files/ssn_policy.json'
+        ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        ssn_conf['vpc2_cidr'] = os.environ['conf_vpc2_cidr']
+        ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
+        ssn_conf['vpc2_name'] = '{}-vpc2'.format(ssn_conf['service_base_name'])
+        ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+        ssn_conf['allowed_ip_cidr'] = list()
         for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-            allowed_ip_cidr.append({"CidrIp": cidr.replace(' ','')})
-        sg_name = instance_name + '-sg'
-        network_type = os.environ['conf_network_type']
-        all_ip_cidr = '0.0.0.0/0'
-        elastic_ip_name = '{0}-ssn-EIP'.format(service_base_name)
+            ssn_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ','')})
+        ssn_conf['sg_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+        ssn_conf['network_type'] = os.environ['conf_network_type']
+        ssn_conf['all_ip_cidr'] = '0.0.0.0/0'
+        ssn_conf['elastic_ip_name'] = '{0}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
 
-        if get_instance_by_name(tag_name, instance_name):
-            print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+    if dlab.meta_lib.get_instance_by_name(ssn_conf['tag_name'], ssn_conf['instance_name']):
+        print("Service base name should be unique and less or equal 20 symbols. Please try again.")
+        sys.exit(1)
+
+    try:
+        if not os.environ['aws_vpc_id']:
+            raise KeyError
+    except KeyError:
+        try:
+            ssn_conf['pre_defined_vpc'] = True
+            logging.info('[CREATE VPC AND ROUTE TABLE]')
+            print('[CREATE VPC AND ROUTE TABLE]')
+            params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --vpc_name {}".format(
+                ssn_conf['vpc_cidr'], ssn_conf['region'], ssn_conf['tag_name'], ssn_conf['service_base_name'],
+                ssn_conf['vpc_name'])
+            try:
+                local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            os.environ['aws_vpc_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'],
+                                                                    ssn_conf['service_base_name'])
+        except Exception as err:
+            dlab.fab.append_result("Failed to create VPC", str(err))
             sys.exit(1)
 
+    ssn_conf['allowed_vpc_cidr_ip_ranges'] = list()
+    for cidr in dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_vpc_id']):
+        ssn_conf['allowed_vpc_cidr_ip_ranges'].append({"CidrIp": cidr})
+
+    try:
+        if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
+            raise KeyError
+    except KeyError:
         try:
-            if not os.environ['aws_vpc_id']:
-                raise KeyError
-        except KeyError:
+            ssn_conf['pre_defined_vpc2'] = True
+            logging.info('[CREATE SECONDARY VPC AND ROUTE TABLE]')
+            print('[CREATE SECONDARY VPC AND ROUTE TABLE]')
+            params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary " \
+                     "--vpc_name {}".format(ssn_conf['vpc2_cidr'], ssn_conf['region'], ssn_conf['tag2_name'],
+                                            ssn_conf['service_base_name'], ssn_conf['vpc2_name'])
             try:
-                pre_defined_vpc = True
-                logging.info('[CREATE VPC AND ROUTE TABLE]')
-                print('[CREATE VPC AND ROUTE TABLE]')
-                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --vpc_name {}".format(
-                    vpc_cidr, region, tag_name, service_base_name, vpc_name)
+                local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            os.environ['aws_vpc2_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
+                                                                     ssn_conf['service_base_name'])
+        except Exception as err:
+            dlab.fab.append_result("Failed to create secondary VPC.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            sys.exit(1)
+
+    try:
+        if os.environ['aws_subnet_id'] == '':
+            raise KeyError
+    except KeyError:
+        try:
+            ssn_conf['pre_defined_subnet'] = True
+            logging.info('[CREATE SUBNET]')
+            print('[CREATE SUBNET]')
+            params = "--vpc_id {0} --username {1} --infra_tag_name {2} --infra_tag_value {3} --prefix {4} " \
+                     "--ssn {5} --zone {6} --subnet_name {7}".format(
+                      os.environ['aws_vpc_id'], 'ssn', ssn_conf['tag_name'],ssn_conf['service_base_name'], '20',
+                      True, ssn_conf['zone_full'], ssn_conf['subnet_name'])
+            try:
+                local("~/scripts/{}.py {}".format('common_create_subnet', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            with open('/tmp/ssn_subnet_id', 'r') as f:
+                os.environ['aws_subnet_id'] = f.read()
+            dlab.actions_lib.enable_auto_assign_ip(os.environ['aws_subnet_id'])
+        except Exception as err:
+            dlab.fab.append_result("Failed to create Subnet.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
                 try:
-                    local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+                    dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                os.environ['aws_vpc_id'] = get_vpc_by_tag(tag_name, service_base_name)
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create VPC. Exception:" + str(err))
-                sys.exit(1)
-
-        allowed_vpc_cidr_ip_ranges = list()
-        for cidr in get_vpc_cidr_by_id(os.environ['aws_vpc_id']):
-            allowed_vpc_cidr_ip_ranges.append({"CidrIp": cidr})
-
-        try:
-            if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
-                raise KeyError
-        except KeyError:
-            try:
-                pre_defined_vpc2 = True
-                logging.info('[CREATE SECONDARY VPC AND ROUTE TABLE]')
-                print('[CREATE SECONDARY VPC AND ROUTE TABLE]')
-                params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary " \
-                         "--vpc_name {}".format(vpc2_cidr, region, tag2_name, service_base_name, vpc2_name)
+                    print("Subnet hasn't been created.")
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
                 try:
-                    local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                os.environ['aws_vpc2_id'] = get_vpc_by_tag(tag2_name, service_base_name)
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create secondary VPC. Exception:" + str(err))
-                if pre_defined_vpc:
-                    remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                    remove_route_tables(tag_name, True)
-                    remove_vpc(os.environ['aws_vpc_id'])
-                sys.exit(1)
+                    print("There are no VPC Endpoints")
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+            sys.exit(1)
 
+    try:
+        if os.environ['conf_duo_vpc_enable'] == 'true' and os.environ['aws_vpc_id'] and os.environ['aws_vpc2_id']:
+            raise KeyError
+    except KeyError:
         try:
-            if os.environ['aws_subnet_id'] == '':
-                raise KeyError
-        except KeyError:
-            try:
-                pre_defined_subnet = True
-                logging.info('[CREATE SUBNET]')
-                print('[CREATE SUBNET]')
-                params = "--vpc_id {0} --username {1} --infra_tag_name {2} --infra_tag_value {3} --prefix {4} " \
-                         "--ssn {5} --zone {6} --subnet_name {7}".format(os.environ['aws_vpc_id'], 'ssn', tag_name,
-                                                             service_base_name, '20', True, zone_full, subnet_name)
+            logging.info('[CREATE PEERING CONNECTION]')
+            print('[CREATE PEERING CONNECTION]')
+            os.environ['aws_peering_id'] = dlab.actions_lib.create_peering_connection(
+                os.environ['aws_vpc_id'], os.environ['aws_vpc2_id'], ssn_conf['service_base_name'])
+            print('PEERING CONNECTION ID:' + os.environ['aws_peering_id'])
+            dlab.actions_lib.create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'],
+                                                os.environ['aws_peering_id'],
+                                                dlab.meta_lib.get_cidr_by_vpc(os.environ['aws_vpc2_id']))
+        except Exception as err:
+            dlab.fab.append_result("Failed to create peering connection.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
                 try:
-                    local("~/scripts/{}.py {}".format('common_create_subnet', params))
+                    dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                with open('/tmp/ssn_subnet_id', 'r') as f:
-                    os.environ['aws_subnet_id'] = f.read()
-                enable_auto_assign_ip(os.environ['aws_subnet_id'])
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create Subnet.", str(err))
-                if pre_defined_vpc:
-                    remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                    remove_route_tables(tag_name, True)
-                    try:
-                        remove_subnets(service_base_name + "-subnet")
-                    except:
-                        print("Subnet hasn't been created.")
-                    remove_vpc(os.environ['aws_vpc_id'])
-                if pre_defined_vpc2:
-                    try:
-                        remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-                    except:
-                        print("There are no VPC Endpoints")
-                    remove_route_tables(tag2_name, True)
-                    remove_vpc(os.environ['aws_vpc2_id'])
-                sys.exit(1)
-
-        try:
-            if os.environ['conf_duo_vpc_enable'] == 'true' and os.environ['aws_vpc_id'] and os.environ['aws_vpc2_id']:
-                raise KeyError
-        except KeyError:
-            try:
-                logging.info('[CREATE PEERING CONNECTION]')
-                print('[CREATE PEERING CONNECTION]')
-                os.environ['aws_peering_id'] = create_peering_connection(os.environ['aws_vpc_id'],
-                                                                         os.environ['aws_vpc2_id'], service_base_name)
-                print('PEERING CONNECTION ID:' + os.environ['aws_peering_id'])
-                create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'], os.environ['aws_peering_id'],
-                                   get_cidr_by_vpc(os.environ['aws_vpc2_id']))
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed to create peering connection.", str(err))
-                if pre_defined_vpc:
-                    remove_route_tables(tag_name, True)
-                    try:
-                        remove_subnets(service_base_name + "-subnet")
-                    except:
-                        print("Subnet hasn't been created.")
-                    remove_vpc(os.environ['aws_vpc_id'])
-                if pre_defined_vpc2:
-                    remove_peering('*')
-                    try:
-                        remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-                    except:
-                        print("There are no VPC Endpoints")
-                    remove_route_tables(tag2_name, True)
-                    remove_vpc(os.environ['aws_vpc2_id'])
-                sys.exit(1)
-
-        try:
-            if os.environ['aws_security_groups_ids'] == '':
-                raise KeyError
-        except KeyError:
-            try:
-                pre_defined_sg = True
-                logging.info('[CREATE SG FOR SSN]')
-                print('[CREATE SG FOR SSN]')
-                ingress_sg_rules_template = format_sg([
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 80,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 22,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 443,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": -1,
-                        "IpRanges": allowed_ip_cidr,
-                        "ToPort": -1, "IpProtocol": "icmp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 80,
-                        "IpRanges": allowed_vpc_cidr_ip_ranges,
-                        "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    },
-                    {
-                        "PrefixListIds": [],
-                        "FromPort": 443,
-                        "IpRanges": allowed_vpc_cidr_ip_ranges,
-                        "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
-                    }
-                ])
-                egress_sg_rules_template = format_sg([
-                    {"IpProtocol": "-1", "IpRanges": [{"CidrIp": all_ip_cidr}], "UserIdGroupPairs": [], "PrefixListIds": []}
-                ])
-                params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
-                         "--infra_tag_value {} --force {} --ssn {}". \
-                    format(sg_name, os.environ['aws_vpc_id'], json.dumps(ingress_sg_rules_template),
-                           json.dumps(egress_sg_rules_template), service_base_name, tag_name, False, True)
+                    print("Subnet hasn't been created.")
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    local("~/scripts/{}.py {}".format('common_create_security_group', params))
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
-                    traceback.print_exc()
-                    raise Exception
-                with open('/tmp/ssn_sg_id', 'r') as f:
-                    os.environ['aws_security_groups_ids'] = f.read()
-            except Exception as err:
-                print('Error: {0}'.format(err))
-                append_result("Failed creating security group for SSN.", str(err))
-                if pre_defined_vpc:
-                    remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                    remove_subnets(service_base_name + "-subnet")
-                    remove_route_tables(tag_name, True)
-                    remove_vpc(os.environ['aws_vpc_id'])
-                if pre_defined_vpc2:
-                    remove_peering('*')
-                    try:
-                        remove_vpc_endpoints(os.environ['aws_vpc2_id'])
-                    except:
-                        print("There are no VPC Endpoints")
-                    remove_route_tables(tag2_name, True)
-                    remove_vpc(os.environ['aws_vpc2_id'])
-                sys.exit(1)
+                    print("There are no VPC Endpoints")
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+            sys.exit(1)
+
+    try:
+        if os.environ['aws_security_groups_ids'] == '':
+            raise KeyError
+    except KeyError:
+        try:
+            ssn_conf['pre_defined_sg'] = True
+            logging.info('[CREATE SG FOR SSN]')
+            print('[CREATE SG FOR SSN]')
+            ssn_conf['ingress_sg_rules_template'] = dlab.meta_lib.format_sg([
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 80,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 22,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 443,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": -1,
+                    "IpRanges": ssn_conf['allowed_ip_cidr'],
+                    "ToPort": -1, "IpProtocol": "icmp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 80,
+                    "IpRanges": ssn_conf['allowed_vpc_cidr_ip_ranges'],
+                    "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                },
+                {
+                    "PrefixListIds": [],
+                    "FromPort": 443,
+                    "IpRanges": ssn_conf['allowed_vpc_cidr_ip_ranges'],
+                    "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+                }
+            ])
+            egress_sg_rules_template = dlab.meta_lib.format_sg([
+                {"IpProtocol": "-1", "IpRanges": [{"CidrIp": ssn_conf['all_ip_cidr']}], "UserIdGroupPairs": [],
+                 "PrefixListIds": []}
+            ])
+            params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
+                     "--infra_tag_value {} --force {} --ssn {}". \
+                format(ssn_conf['sg_name'], os.environ['aws_vpc_id'],
+                       json.dumps(ssn_conf['ingress_sg_rules_template']), json.dumps(egress_sg_rules_template),
+                       ssn_conf['service_base_name'], ssn_conf['tag_name'], False, True)
+            try:
+                local("~/scripts/{}.py {}".format('common_create_security_group', params))
+            except:
+                traceback.print_exc()
+                raise Exception
+            with open('/tmp/ssn_sg_id', 'r') as f:
+                os.environ['aws_security_groups_ids'] = f.read()
+        except Exception as err:
+            dlab.gab_lib.append_result("Failed creating security group for SSN.", str(err))
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
+                try:
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                except:
+                    print("There are no VPC Endpoints")
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+            sys.exit(1)
+
+    try:
         logging.info('[CREATE ROLES]')
         print('[CREATE ROLES]')
         params = "--role_name {} --role_profile_name {} --policy_name {} --policy_file_name {} --region {} " \
                  "--infra_tag_name {} --infra_tag_value {} --user_tag_value {}".\
-            format(role_name, role_profile_name, policy_name, policy_path, os.environ['aws_region'], tag_name,
-                   service_base_name, user_tag)
+            format(ssn_conf['role_name'], ssn_conf['role_profile_name'], ssn_conf['policy_name'],
+                   ssn_conf['policy_path'], os.environ['aws_region'], ssn_conf['tag_name'],
+                   ssn_conf['service_base_name'], ssn_conf['user_tag'])
         try:
             local("~/scripts/{}.py {}".format('common_create_role_policy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create roles.", str(err))
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
+        dlab.fab.append_result("Unable to create roles.", str(err))
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
             try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
             except:
                 print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
         sys.exit(1)
 
     try:
         logging.info('[CREATE ENDPOINT AND ROUTE-TABLE]')
         print('[CREATE ENDPOINT AND ROUTE-TABLE]')
         params = "--vpc_id {} --region {} --infra_tag_name {} --infra_tag_value {}".format(
-            os.environ['aws_vpc_id'], os.environ['aws_region'], tag_name, service_base_name)
+            os.environ['aws_vpc_id'], os.environ['aws_region'], ssn_conf['tag_name'], ssn_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('ssn_create_endpoint', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create an endpoint.", str(err))
-        remove_all_iam_resources(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
+        dlab.fab.append_result("Unable to create an endpoint.", str(err))
+        dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
             try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
             except:
                 print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
         sys.exit(1)
 
     if os.environ['conf_duo_vpc_enable'] == 'true':
@@ -355,42 +371,44 @@
             logging.info('[CREATE ENDPOINT AND ROUTE-TABLE FOR NOTEBOOK VPC]')
             print('[CREATE ENDPOINT AND ROUTE-TABLE FOR NOTEBOOK VPC]')
             params = "--vpc_id {} --region {} --infra_tag_name {} --infra_tag_value {}".format(
-                os.environ['aws_vpc2_id'], os.environ['aws_region'], tag2_name, service_base_name)
+                os.environ['aws_vpc2_id'], os.environ['aws_region'], ssn_conf['tag2_name'],
+                ssn_conf['service_base_name'])
             try:
                 local("~/scripts/{}.py {}".format('ssn_create_endpoint', params))
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Unable to create secondary endpoint.", str(err))
-            remove_all_iam_resources(instance)
-            if pre_defined_sg:
-                remove_sgroups(tag_name)
-            if pre_defined_subnet:
-                remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                remove_subnets(service_base_name + "-subnet")
-            if pre_defined_vpc:
-                remove_route_tables(tag_name, True)
-                remove_vpc(os.environ['aws_vpc_id'])
-            if pre_defined_vpc2:
-                remove_peering('*')
+            dlab.fab.append_result("Unable to create secondary endpoint.", str(err))
+            dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+            if ssn_conf['pre_defined_sg']:
+                dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+            if ssn_conf['pre_defined_subnet']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
                     print("There are no VPC Endpoints")
-                remove_route_tables(tag2_name, True)
-                remove_vpc(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
             sys.exit(1)
 
     try:
         logging.info('[CREATE SSN INSTANCE]')
         print('[CREATE SSN INSTANCE]')
-        params = "--node_name {0} --ami_id {1} --instance_type {2} --key_name {3} --security_group_ids {4} --subnet_id {5} " \
-                 "--iam_profile {6} --infra_tag_name {7} --infra_tag_value {8} --instance_class {9} --primary_disk_size {10}".\
-            format(instance_name, ssn_ami_id, os.environ['aws_ssn_instance_size'], os.environ['conf_key_name'],
-                   os.environ['aws_security_groups_ids'], os.environ['aws_subnet_id'],
-                   role_profile_name, tag_name, instance_name, 'ssn', '20')
+        params = "--node_name {0} --ami_id {1} --instance_type {2} --key_name {3} --security_group_ids {4} " \
+                 "--subnet_id {5} --iam_profile {6} --infra_tag_name {7} --infra_tag_value {8} --instance_class {9} " \
+                 "--primary_disk_size {10}".\
+            format(ssn_conf['instance_name'], ssn_conf['ssn_ami_id'], os.environ['aws_ssn_instance_size'],
+                   os.environ['conf_key_name'], os.environ['aws_security_groups_ids'], os.environ['aws_subnet_id'],
+                   ssn_conf['role_profile_name'], ssn_conf['tag_name'], ssn_conf['instance_name'], 'ssn', '20')
 
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
@@ -398,107 +416,112 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create ssn instance.", str(err))
-        remove_all_iam_resources(instance)
-        remove_s3(instance)
-        if pre_defined_sg:
-            remove_sgroups(tag_name)
-        if pre_defined_subnet:
-            remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-            remove_subnets(service_base_name + "-subnet")
-        if pre_defined_vpc:
-            remove_vpc_endpoints(os.environ['aws_vpc_id'])
-            remove_route_tables(tag_name, True)
-            remove_vpc(os.environ['aws_vpc_id'])
-        if pre_defined_vpc2:
-            remove_peering('*')
+        dlab.fab.append_result("Unable to create ssn instance.", str(err))
+        dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+        dlab.actions_lib.remove_s3(ssn_conf['instance'])
+        if ssn_conf['pre_defined_sg']:
+            dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+        if ssn_conf['pre_defined_subnet']:
+            dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                      ssn_conf['service_base_name'])
+            dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+        if ssn_conf['pre_defined_vpc']:
+            dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+        if ssn_conf['pre_defined_vpc2']:
+            dlab.actions_lib.remove_peering('*')
             try:
-                remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
             except:
                 print("There are no VPC Endpoints")
-            remove_route_tables(tag2_name, True)
-            remove_vpc(os.environ['aws_vpc2_id'])
+            dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+            dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
         sys.exit(1)
 
-    if network_type == 'public':
+    if ssn_conf['network_type'] == 'public':
         try:
             logging.info('[ASSOCIATING ELASTIC IP]')
             print('[ASSOCIATING ELASTIC IP]')
-            ssn_id = get_instance_by_name(tag_name, instance_name)
+            ssn_conf['ssn_id'] = dlab.meta_lib.get_instance_by_name(ssn_conf['tag_name'], ssn_conf['instance_name'])
             try:
-                elastic_ip = os.environ['ssn_elastic_ip']
+                ssn_conf['elastic_ip'] = os.environ['ssn_elastic_ip']
             except:
-                elastic_ip = 'None'
+                ssn_conf['elastic_ip'] = 'None'
             params = "--elastic_ip {} --ssn_id {} --infra_tag_name {} --infra_tag_value {}".format(
-                elastic_ip, ssn_id, tag_name, elastic_ip_name)
+                ssn_conf['elastic_ip'], ssn_conf['ssn_id'], ssn_conf['tag_name'], ssn_conf['elastic_ip_name'])
             try:
                 local("~/scripts/{}.py {}".format('ssn_associate_elastic_ip', params))
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to associate elastic ip.", str(err))
-            remove_ec2(tag_name, instance_name)
-            remove_all_iam_resources(instance)
-            remove_s3(instance)
-            if pre_defined_sg:
-                remove_sgroups(tag_name)
-            if pre_defined_subnet:
-                remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                remove_subnets(service_base_name + "-subnet")
-            if pre_defined_vpc:
-                remove_vpc_endpoints(os.environ['aws_vpc_id'])
-                remove_route_tables(tag_name, True)
-                remove_vpc(os.environ['aws_vpc_id'])
-            if pre_defined_vpc2:
-                remove_peering('*')
+            dlab.fab.append_result("Failed to associate elastic ip.", str(err))
+            dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+            dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+            dlab.actions_lib.remove_s3(ssn_conf['instance'])
+            if ssn_conf['pre_defined_sg']:
+                dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+            if ssn_conf['pre_defined_subnet']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
                     print("There are no VPC Endpoints")
-                remove_route_tables(tag2_name, True)
-                remove_vpc(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
             sys.exit(1)
 
-    if network_type == 'private':
-        instance_ip = get_instance_ip_address(tag_name, instance_name).get('Private')
+    if ssn_conf['network_type'] == 'private':
+        ssn_conf['instance_ip'] = dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+                                                                        ssn_conf['instance_name']).get('Private')
     else:
-        instance_ip = get_instance_ip_address(tag_name, instance_name).get('Public')
+        ssn_conf['instance_ip'] = dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+                                                                        ssn_conf['instance_name']).get('Public')
 
     if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and 'ssn_subdomain' in os.environ:
         try:
             logging.info('[CREATING ROUTE53 RECORD]')
             print('[CREATING ROUTE53 RECORD]')
             try:
-                create_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                                       os.environ['ssn_subdomain'], instance_ip)
+                dlab.actions_lib.create_route_53_record(os.environ['ssn_hosted_zone_id'],
+                                                        os.environ['ssn_hosted_zone_name'],
+                                                        os.environ['ssn_subdomain'], ssn_conf['instance_ip'])
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            append_result("Failed to create route53 record.", str(err))
-            remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
+            dlab.fab.append_result("Failed to create route53 record.", str(err))
+            dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'],
+                                                    os.environ['ssn_hosted_zone_name'],
                                    os.environ['ssn_subdomain'])
-            remove_ec2(tag_name, instance_name)
-            remove_all_iam_resources(instance)
-            remove_s3(instance)
-            if pre_defined_sg:
-                remove_sgroups(tag_name)
-            if pre_defined_subnet:
-                remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
-                remove_subnets(service_base_name + "-subnet")
-            if pre_defined_vpc:
-                remove_vpc_endpoints(os.environ['aws_vpc_id'])
-                remove_route_tables(tag_name, True)
-                remove_vpc(os.environ['aws_vpc_id'])
-            if pre_defined_vpc2:
-                remove_peering('*')
+            dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+            dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+            dlab.actions_lib.remove_s3(ssn_conf['instance'])
+            if ssn_conf['pre_defined_sg']:
+                dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+            if ssn_conf['pre_defined_subnet']:
+                dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+                                                          ssn_conf['service_base_name'])
+                dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+            if ssn_conf['pre_defined_vpc']:
+                dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+            if ssn_conf['pre_defined_vpc2']:
+                dlab.actions_lib.remove_peering('*')
                 try:
-                    remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+                    dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
                 except:
                     print("There are no VPC Endpoints")
-                remove_route_tables(tag2_name, True)
-                remove_vpc(os.environ['aws_vpc2_id'])
+                dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+                dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
             sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
index 76a119d..975e8d3 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
@@ -21,11 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import sys
+import os
+import logging
+import traceback
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import json
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -35,17 +40,17 @@
                         filename=local_log_filepath)
     # generating variables dictionary
     if 'aws_access_key' in os.environ and 'aws_secret_access_key' in os.environ:
-        create_aws_config_files(generate_full_config=True)
+        dlab.actions_lib.create_aws_config_files(generate_full_config=True)
     else:
-        create_aws_config_files()
+        dlab.actions_lib.create_aws_config_files()
     print('Generating infrastructure names and tags')
     ssn_conf = dict()
-    ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    ssn_conf['tag_name'] = ssn_conf['service_base_name'] + '-Tag'
+    ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+    ssn_conf['tag_name'] = ssn_conf['service_base_name'] + '-tag'
     ssn_conf['edge_sg'] = ssn_conf['service_base_name'] + "*" + '-edge'
     ssn_conf['nb_sg'] = ssn_conf['service_base_name'] + "*" + '-nb'
-    ssn_conf['de_sg'] = ssn_conf['service_base_name'] + "*" + '-dataengine*'
+    ssn_conf['de_sg'] = ssn_conf['service_base_name'] + "*" + '-de*'
     ssn_conf['de-service_sg'] = ssn_conf['service_base_name'] + "*" + '-des-*'
 
     try:
@@ -61,7 +66,7 @@
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to terminate ssn.", str(err))
+        dlab.fab.append_result("Failed to terminate ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -70,6 +75,6 @@
                    "Action": "Terminate ssn with all service_base_name environment"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
index 7aa6629..27b5913 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
@@ -21,12 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
 import boto3
 import argparse
 import sys
-from dlab.ssn_lib import *
 import os
 
 parser = argparse.ArgumentParser()
@@ -37,7 +38,7 @@
 parser.add_argument('--service_base_name', type=str)
 parser.add_argument('--de_se_sg', type=str)
 args = parser.parse_args()
-tag2 = args.service_base_name + '-secondary-Tag'
+tag2 = args.service_base_name + '-secondary-tag'
 
 ##############
 # Run script #
@@ -46,120 +47,129 @@
 if __name__ == "__main__":
     print('Terminating EMR cluster')
     try:
-        clusters_list = get_emr_list(args.tag_name)
+        clusters_list = dlab.meta_lib.get_emr_list(args.tag_name)
         if clusters_list:
             for cluster_id in clusters_list:
                 client = boto3.client('emr')
                 cluster = client.describe_cluster(ClusterId=cluster_id)
                 cluster = cluster.get("Cluster")
                 emr_name = cluster.get('Name')
-                terminate_emr(cluster_id)
+                dlab.actions_lib.terminate_emr(cluster_id)
                 print("The EMR cluster {} has been terminated successfully".format(emr_name))
         else:
             print("There are no EMR clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
         sys.exit(1)
 
     print("Deregistering notebook's AMI")
     try:
-        deregister_image()
-    except:
+        dlab.actions_lib.deregister_image()
+    except Exception as err:
+        dlab.fab.append_result("Failed to deregister images.", str(err))
         sys.exit(1)
 
     print("Terminating EC2 instances")
     try:
-        remove_ec2(args.tag_name, '*')
-    except:
+        dlab.actions_lib.remove_ec2(args.tag_name, '*')
+    except Exception as err:
+        dlab.fab.append_result("Failed to terminate instances.", str(err))
         sys.exit(1)
 
     if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and 'ssn_subdomain' in os.environ:
         print("Removing Route53 records")
-        remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
-                               os.environ['ssn_subdomain'])
+        dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
+                                                os.environ['ssn_subdomain'])
 
     print("Removing security groups")
     try:
-        remove_sgroups(args.de_se_sg)
-        remove_sgroups(args.de_sg)
-        remove_sgroups(args.nb_sg)
-        remove_sgroups(args.edge_sg)
+        dlab.actions_lib.remove_sgroups(args.de_se_sg)
+        dlab.actions_lib.remove_sgroups(args.de_sg)
+        dlab.actions_lib.remove_sgroups(args.nb_sg)
+        dlab.actions_lib.remove_sgroups(args.edge_sg)
         try:
-            remove_sgroups(args.tag_name)
+            dlab.actions_lib.remove_sgroups(args.tag_name)
         except:
             print("There is no pre-defined SSN SG")
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove security groups.", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        remove_subnets('*')
-    except:
+        dlab.actions_lib.remove_subnets('*')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove subnets.", str(err))
         sys.exit(1)
 
     print("Removing peering connection")
     try:
-        remove_peering('*')
-    except:
+        dlab.actions_lib.remove_peering('*')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove peering connections.", str(err))
         sys.exit(1)
 
     print("Removing s3 buckets")
     try:
-        remove_s3()
-    except:
+        dlab.actions_lib.remove_s3()
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove buckets.", str(err))
         sys.exit(1)
 
     print("Removing IAM roles, profiles and policies")
     try:
-        remove_all_iam_resources('all')
-    except:
+        dlab.actions_lib.remove_all_iam_resources('all')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove IAM roles, profiles and policies.", str(err))
         sys.exit(1)
 
     print("Removing route tables")
     try:
-        remove_route_tables(args.tag_name)
-        remove_route_tables(tag2)
-    except:
+        dlab.actions_lib.remove_route_tables(args.tag_name)
+        dlab.actions_lib.remove_route_tables(tag2)
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove route tables.", str(err))
         sys.exit(1)
 
     print("Removing SSN subnet")
     try:
-        remove_subnets(args.service_base_name + '-subnet')
-    except:
-        print("There is no pre-defined SSN Subnet")
+        dlab.actions_lib.remove_subnets(args.service_base_name + '-subnet')
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove SSN subnet.", str(err))
+        sys.exit(1)
 
     print("Removing SSN VPC")
     try:
-        vpc_id = get_vpc_by_tag(args.tag_name, args.service_base_name)
+        vpc_id = dlab.meta_lib.get_vpc_by_tag(args.tag_name, args.service_base_name)
         if vpc_id != '':
             try:
-                remove_vpc_endpoints(vpc_id)
+                dlab.actions_lib.remove_vpc_endpoints(vpc_id)
             except:
                 print("There is no such VPC Endpoint")
             try:
-                remove_internet_gateways(vpc_id, args.tag_name, args.service_base_name)
+                dlab.actions_lib.remove_internet_gateways(vpc_id, args.tag_name, args.service_base_name)
             except:
                 print("There is no such Internet gateway")
-            remove_route_tables(args.tag_name, True)
-            remove_vpc(vpc_id)
+            dlab.actions_lib.remove_route_tables(args.tag_name, True)
+            dlab.actions_lib.remove_vpc(vpc_id)
         else:
             print("There is no pre-defined SSN VPC")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove SSN VPC.", str(err))
         sys.exit(1)
 
     print("Removing notebook VPC")
     try:
-        vpc_id = get_vpc_by_tag(tag2, args.service_base_name)
+        vpc_id = dlab.meta_lib.get_vpc_by_tag(tag2, args.service_base_name)
         if vpc_id != '':
             try:
-                remove_vpc_endpoints(vpc_id)
+                dlab.actions_lib.remove_vpc_endpoints(vpc_id)
             except:
                 print("There is no such VPC Endpoint")
-            remove_route_tables(tag2, True)
-            remove_vpc(vpc_id)
+            dlab.actions_lib.remove_route_tables(tag2, True)
+            dlab.actions_lib.remove_vpc(vpc_id)
         else:
             print("There is no pre-defined notebook VPC")
     except Exception as err:
-        print('Error: {0}'.format(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to remove wecondary VPC.", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
index a10ccae..6baaf45 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -44,74 +46,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'],
-                                                            notebook_config['instance_name']).get('Private')
-    tag = {"Key": notebook_config['tag_name'],
-           "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
-    notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
-    notebook_config['rstudio_pass'] = id_generator()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -119,9 +125,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -138,9 +143,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -156,9 +160,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and RSTUDIO and all dependencies
@@ -168,7 +171,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
             .format(instance_hostname, keyfile_name,
                     os.environ['aws_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -180,9 +183,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure tensoflow-rstudio.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure tensoflow-rstudio.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -198,9 +200,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -211,12 +212,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -232,9 +232,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -245,112 +244,121 @@
             'tensor': True
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio', notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
+                print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    rstudio_dns_url = "http://" + dns_name + ":8787/"
-    rstudio_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_access_url = "https://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio URL: {}".format(rstudio_dns_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
-        notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
-        notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        rstudio_dns_url = "http://" + dns_name + ":8787/"
+        rstudio_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+                                                                notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio URL: {}".format(rstudio_dns_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "RStudio",
-                    "url": rstudio_notebook_access_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_access_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_access_url}#,
-                   #{"description": "RStudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "RStudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "RStudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
index ebcb814..3cf3a46 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -44,72 +46,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    tag = {"Key": notebook_config['tag_name'],
-           "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
-    notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
 
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -117,9 +124,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -136,9 +142,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -154,9 +159,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and all dependencies
@@ -165,7 +169,7 @@
         print('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
         params = "--hostname {0} --keyfile {1} " \
                  "--region {2} --os_user {3} " \
-                 "--ip_adress {4} --exploratory_name {5} --edge_ip {6}" \
+                 "--ip_address {4} --exploratory_name {5} --edge_ip {6}" \
                  .format(instance_hostname, keyfile_name,
                          os.environ['aws_region'], notebook_config['dlab_ssh_user'],
                          notebook_config['ip_address'], notebook_config['exploratory_name'], edge_ip)
@@ -175,9 +179,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure TensorFlow.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -193,9 +196,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -206,12 +208,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -227,9 +228,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -240,106 +240,114 @@
             'tensor': True
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'jupyter',notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'jupyter',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
+                print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                              notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+                                                                notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                 notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_access_url = "https://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
-        notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
-        notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_access_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_access_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_access_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
index 8e8e94b..dbdae70 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -44,77 +46,83 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name']
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower()[:12], '-', True)
-    notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['user_keyname'] = os.environ['project_name']
-    notebook_config['network_type'] = os.environ['conf_network_type']
-    notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
-                                                               os.environ['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'], args.uuid)
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if os.environ['conf_shared_image_enabled'] == 'false':
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            os.environ['endpoint_name'],
-            os.environ['application'])
-    notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-    notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
-        .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                  os.environ['project_name'], os.environ['endpoint_name'])
-    notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-
-    region = os.environ['aws_region']
-    if region == 'us-east-1':
-        endpoint_url = 'https://s3.amazonaws.com'
-    elif region == 'cn-north-1':
-        endpoint_url = "https://s3.{}.amazonaws.com.cn".format(region)
-    else:
-        endpoint_url = 'https://s3-{}.amazonaws.com'.format(region)
-
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
-                                                os.environ['project_name'], os.environ['endpoint_name'])
-    edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
-    edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    if notebook_config['network_type'] == 'private':
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
-    else:
-        edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
-    keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['network_type'] = os.environ['conf_network_type']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+                                                                      notebook_config['project_name'],
+                                                                      notebook_config['endpoint_name'],
+                                                                      notebook_config['exploratory_name'], args.uuid)
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if os.environ['conf_shared_image_enabled'] == 'false':
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        else:
+            notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
+        notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+        notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+            notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+        notebook_config['region'] = os.environ['aws_region']
+        if notebook_config['region'] == 'us-east-1':
+            notebook_config['endpoint_url'] = 'https://s3.amazonaws.com'
+        elif notebook_config['region'] == 'cn-north-1':
+            notebook_config['endpoint_url'] = "https://s3.{}.amazonaws.com.cn".format(notebook_config['region'])
+        else:
+            notebook_config['endpoint_url'] = 'https://s3-{}.amazonaws.com'.format(notebook_config['region'])
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                notebook_config['instance_name'])
+        edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+                                                    notebook_config['project_name'], notebook_config['endpoint_name'])
+        edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+        edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                                         edge_instance_name).get('Private')
+        notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                        edge_instance_name)
+        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -122,9 +130,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -141,9 +148,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -160,8 +166,8 @@
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring zeppelin and all dependencies
@@ -169,7 +175,8 @@
         logging.info('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
         print('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
         additional_config = {"frontend_hostname": edge_instance_hostname,
-                             "backend_hostname": get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name']),
+                             "backend_hostname": dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+                                                                                     notebook_config['instance_name']),
                              "backend_port": "8080",
                              "nginx_template_dir": "/root/templates/"}
         params = "--hostname {0} --instance_name {1} " \
@@ -180,13 +187,13 @@
                  "--zeppelin_version {10} --scala_version {11} " \
                  "--livy_version {12} --multiple_clusters {13} " \
                  "--r_mirror {14} --endpoint_url {15} " \
-                 "--ip_adress {16} --exploratory_name {17} --edge_ip {18}" \
+                 "--ip_address {16} --exploratory_name {17} --edge_ip {18}" \
             .format(instance_hostname, notebook_config['instance_name'], keyfile_name, os.environ['aws_region'],
                     json.dumps(additional_config), notebook_config['dlab_ssh_user'], os.environ['notebook_spark_version'],
                     os.environ['notebook_hadoop_version'], edge_instance_hostname, '3128',
                     os.environ['notebook_zeppelin_version'], os.environ['notebook_scala_version'],
                     os.environ['notebook_livy_version'], os.environ['notebook_multiple_clusters'],
-                    os.environ['notebook_r_mirror'], endpoint_url, notebook_config['ip_address'],
+                    os.environ['notebook_r_mirror'], notebook_config['endpoint_url'], notebook_config['ip_address'],
                     notebook_config['exploratory_name'], edge_ip)
         try:
             local("~/scripts/{}.py {}".format('configure_zeppelin_node', params))
@@ -194,9 +201,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure zeppelin.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -212,9 +218,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -225,12 +230,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
     
     try:
@@ -246,9 +250,8 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -259,99 +262,106 @@
             'tensor': False
         }
         params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
-            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'zeppelin', notebook_config['exploratory_name'], json.dumps(additional_info))
+            .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'zeppelin',
+                    notebook_config['exploratory_name'], json.dumps(additional_info))
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING AMI]')
-            ami_id = get_ami_id_by_name(`notebook_config['expected_image_name']`)
+            ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
             if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
                 print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
-                        os.environ['project_name'], os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+                        os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
                         os.environ['project_name'], os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
             else:
+                print("Looks like it's first time we configure notebook server. Creating image.")
                 try:
-                    os.environ['conf_additional_tags'] = os.environ[
-                                                             'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
-                        os.environ['endpoint_name'])
+                    os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+                        os.environ['conf_additional_tags'], os.environ['endpoint_name'])
                 except KeyError:
                     os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
                         os.environ['endpoint_name'])
-                image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
-                                                      instance_name=notebook_config['instance_name'],
-                                                      image_name=notebook_config['expected_image_name'])
+                image_id = dlab.actions_lib.create_image_from_instance(
+                    tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+                    image_name=notebook_config['expected_image_name'])
                 if image_id != '':
                     print("Image was successfully created. It's ID is {}".format(image_id))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
             sys.exit(1)
+    try:
+        # generating output information
+        ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+                                                           notebook_config['instance_name']).get('Private')
+        dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+        zeppelin_ip_url = "http://" + ip_address + ":8080/"
+        zeppelin_dns_url = "http://" + dns_name + ":8080/"
+        zeppelin_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+                                                               notebook_config['exploratory_name'])
+        zeppelin_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+                                                                  notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private DNS: {}".format(dns_name))
+        print("Private IP: {}".format(ip_address))
+        print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                          notebook_config['instance_name'])))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['user_keyname']))
+        print("AMI name: {}".format(notebook_config['notebook_image_name']))
+        print("Profile name: {}".format(notebook_config['role_profile_name']))
+        print("SG name: {}".format(notebook_config['security_group_name']))
+        print("Zeppelin URL: {}".format(zeppelin_ip_url))
+        print("Zeppelin URL: {}".format(zeppelin_dns_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+        print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+              format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
 
-    # generating output information
-    ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-    dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
-    zeppelin_ip_url = "http://" + ip_address + ":8080/"
-    zeppelin_dns_url = "http://" + dns_name + ":8080/"
-    zeppelin_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
-    zeppelin_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private DNS: {}".format(dns_name))
-    print("Private IP: {}".format(ip_address))
-    print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(notebook_config['user_keyname']))
-    print("AMI name: {}".format(notebook_config['notebook_image_name']))
-    print("Profile name: {}".format(notebook_config['role_profile_name']))
-    print("SG name: {}".format(notebook_config['security_group_name']))
-    print("Zeppelin URL: {}".format(zeppelin_ip_url))
-    print("Zeppelin URL: {}".format(zeppelin_dns_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
-    print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
-          format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": dns_name,
-               "ip": ip_address,
-               "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "notebook_image_name": notebook_config['notebook_image_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Apache Zeppelin",
-                    "url": zeppelin_notebook_access_url},
-                   {"description": "Ungit",
-                    "url": zeppelin_ungit_access_url}#,
-                   #{"description": "Apache Zeppelin (via tunnel)",
-                   # "url": zeppelin_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": dns_name,
+                   "ip": ip_address,
+                   "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+                                                                     notebook_config['instance_name']),
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "notebook_image_name": notebook_config['notebook_image_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Apache Zeppelin",
+                        "url": zeppelin_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": zeppelin_ungit_access_url}#,
+                       #{"description": "Apache Zeppelin (via tunnel)",
+                       # "url": zeppelin_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py b/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
index cb7073a..295e191 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
@@ -82,7 +82,7 @@
                                                                             args.security_group_name,
                                                                             json.loads(args.tags),
                                                                             args.public_ip_name)
-                disk = AzureMeta().get_disk(args.resource_group_name, '{}-disk0'.format(
+                disk = AzureMeta().get_disk(args.resource_group_name, '{}-volume-primary'.format(
                     args.instance_name))
                 if disk:
                     create_option = 'attach'
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py b/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
index c3024c5..dbd3988 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
@@ -21,31 +21,37 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import traceback
 import sys
 import json
+from fabric.api import *
 
 
 if __name__ == "__main__":
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         image_conf = dict()
         image_conf['service_base_name'] = os.environ['conf_service_base_name']
         image_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-        image_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        image_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        image_conf['project_tag'] = os.environ['project_name'].replace('_', '-')
-        image_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-')
-        image_conf['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
+        image_conf['user_name'] = os.environ['edge_user_name']
+        image_conf['project_name'] = os.environ['project_name']
+        image_conf['project_tag'] = image_conf['project_name']
+        image_conf['endpoint_name'] = os.environ['endpoint_name']
+        image_conf['endpoint_tag'] = image_conf['endpoint_name']
         image_conf['instance_name'] = os.environ['notebook_instance_name']
         image_conf['application'] = os.environ['application']
         image_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-        image_conf['image_name'] = os.environ['notebook_image_name'].lower().replace('_', '-')
-        image_conf['full_image_name'] = '{}-{}-{}-{}'.format(image_conf['service_base_name'],
-                                                             image_conf['project_name'],
-                                                             image_conf['application'],
-                                                             image_conf['image_name']).lower()
+        image_conf['image_name'] = os.environ['notebook_image_name']
+        image_conf['full_image_name'] = '{}-{}-{}-{}-{}'.format(image_conf['service_base_name'],
+                                                                image_conf['project_name'],
+                                                                image_conf['endpoint_name'],
+                                                                image_conf['application'],
+                                                                image_conf['image_name'])
         image_conf['tags'] = {"Name": image_conf['service_base_name'],
                               "SBN": image_conf['service_base_name'],
                               "User": image_conf['user_name'],
@@ -55,42 +61,42 @@
                               "FIN": image_conf['full_image_name'],
                               os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
 
-        instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
-                                                               image_conf['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
+                                                             image_conf['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(image_conf['service_base_name'],
                                                        image_conf['project_name'],
                                                        image_conf['endpoint_name'])
-        edge_instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
+        edge_instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
                                                                     edge_instance_name)
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
 
-        instance = AzureMeta().get_instance(image_conf['resource_group_name'], image_conf['instance_name'])
+        instance = AzureMeta.get_instance(image_conf['resource_group_name'], image_conf['instance_name'])
         os.environ['azure_notebook_instance_size'] = instance.hardware_profile.vm_size
         os.environ['exploratory_name'] = instance.tags['Exploratory']
         os.environ['notebook_image_name'] = image_conf['image_name']
 
-        image = AzureMeta().get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+        image = AzureMeta.get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
         if image == '':
             print('Creating image from existing notebook.')
-            prepare_vm_for_image(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
-            AzureActions().create_image_from_instance(image_conf['resource_group_name'],
-                                                      image_conf['instance_name'],
-                                                      os.environ['azure_region'],
-                                                      image_conf['full_image_name'],
-                                                      json.dumps(image_conf['tags']))
+            dlab.actions_lib.prepare_vm_for_image(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
+            AzureActions.create_image_from_instance(image_conf['resource_group_name'],
+                                                    image_conf['instance_name'],
+                                                    os.environ['azure_region'],
+                                                    image_conf['full_image_name'],
+                                                    json.dumps(image_conf['tags']))
             print("Image was successfully created.")
             try:
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(image_conf['resource_group_name'],
-                                                       image_conf['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(image_conf['resource_group_name'],
+                                                     image_conf['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
-                                                                       image_conf['instance_name'])
-                remount_azure_disk(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(image_conf['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
+                                                                     image_conf['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
+                dlab.fab.set_git_proxy(image_conf['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_hostname))
                 additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, image_conf['instance_name'], keyfile_name,
@@ -98,9 +104,8 @@
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
                 print("Image was successfully created. It's name is {}".format(image_conf['full_image_name']))
             except Exception as err:
-                print('Error: {0}'.format(err))
-                AzureActions().remove_instance(image_conf['resource_group_name'], image_conf['instance_name'])
-                append_result("Failed to create instance from image.", str(err))
+                AzureActions.remove_instance(image_conf['resource_group_name'], image_conf['instance_name'])
+                dlab.fab.append_result("Failed to create instance from image.", str(err))
                 sys.exit(1)
 
             with open("/root/result.json", 'w') as result:
@@ -114,6 +119,5 @@
                        "Action": "Create image from notebook"}
                 result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create image from notebook", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create image from notebook", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py b/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
index cfe37fc..2a9e606 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
@@ -35,9 +35,9 @@
 args = parser.parse_args()
 
 resource_group_name = os.environ['azure_resource_group_name']
-ssn_storage_account_tag = ('{0}-{1}-{2}-storage'.format(os.environ['conf_service_base_name'], os.environ['project_name'],
-                                                        os.environ['endpoint_name']))
-container_name = ('{}-ssn-container'.format(os.environ['conf_service_base_name'])).lower().replace('_', '-')
+ssn_storage_account_tag = ('{0}-{1}-{2}-bucket'.format(os.environ['conf_service_base_name'], os.environ['project_name'],
+                                                       os.environ['endpoint_name']))
+container_name = ('{}-ssn-bucket'.format(os.environ['conf_service_base_name'])).lower().replace('_', '-')
 gitlab_certfile = os.environ['conf_gitlab_certfile']
 
 if __name__ == "__main__":
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
index b755c91..2e697eb 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
+from fabric.api import *
+import traceback
+
+
+def clear_resources():
+    for i in range(notebook_config['instance_count'] - 1):
+        slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+        AzureActions.remove_instance(notebook_config['resource_group_name'], slave_name)
+    AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
 
 
 if __name__ == "__main__":
@@ -41,48 +50,50 @@
 
     try:
         # generating variables dictionary
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         print('Generating infrastructure names and tags')
         notebook_config = dict()
-        try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
+        else:
             notebook_config['exploratory_name'] = ''
-        try:
-            notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            notebook_config['computational_name'] = os.environ['computational_name']
+        else:
             notebook_config['computational_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['region'] = os.environ['azure_region']
-        notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
-        notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
-                                          '-de-' + notebook_config['exploratory_name'] + '-' + \
-                                          notebook_config['computational_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['cluster_name'] = '{}-{}-{}-de-{}'.format(notebook_config['service_base_name'],
+                                                                  notebook_config['project_name'],
+                                                                  notebook_config['endpoint_name'],
+                                                                  notebook_config['computational_name'])
         notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
         notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-        notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+        notebook_config['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
         try:
-            notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
+            notebook_config['spark_master_ip'] = AzureMeta.get_private_ip_address(
                 notebook_config['resource_group_name'], notebook_config['master_node_name'])
-            notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+            notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
                 notebook_config['resource_group_name'], notebook_config['notebook_name'])
         except Exception as err:
-            print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to get instance IP address", str(err))
+            clear_resources()
             sys.exit(1)
         notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
 
     except Exception as err:
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
-        append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
         sys.exit(1)
 
     try:
@@ -100,12 +111,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
-        append_result("Failed installing Dataengine kernels.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -125,12 +132,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
-        append_result("Failed to configure Spark.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
         sys.exit(1)
 
     try:
@@ -139,6 +142,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
index d5be204..a4dda9d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import os
 from Crypto.PublicKey import RSA
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -42,17 +44,19 @@
 
     # generating variables dictionary
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['application'] = os.environ['application'].lower().replace('_', '-')
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['application'] = os.environ['application'].lower()
         
         print('Generating infrastructure names and tags')
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
@@ -62,8 +66,9 @@
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
         notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'], os.environ['endpoint_name'],
-                                                                notebook_config['exploratory_name'])
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
                                    "User": notebook_config['user_name'],
@@ -73,9 +78,11 @@
                                    "product": "dlab"}
         notebook_config['network_interface_name'] = notebook_config['instance_name'] + "-nif"
         notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'], os.environ['endpoint_name'])
-        notebook_config['private_subnet_name'] = '{}-{}-subnet'.format(notebook_config['service_base_name'],
-                                                                       notebook_config['project_name'])
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
+        notebook_config['private_subnet_name'] = '{}-{}-{}-subnet'.format(notebook_config['service_base_name'],
+                                                                          notebook_config['project_name'],
+                                                                          notebook_config['endpoint_name'])
         ssh_key_path = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         key = RSA.importKey(open(ssh_key_path, 'rb').read())
         notebook_config['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
@@ -83,33 +90,32 @@
         notebook_config['instance_storage_account_type'] = (lambda x: 'Standard_LRS' if x in ('deeplearning', 'tensor')
                                                             else 'Premium_LRS')(os.environ['application'])
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
         notebook_config['image_type'] = 'default'
 
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            notebook_config['endpoint_name'],
-            notebook_config['project_name'],
-            notebook_config['application'])
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                notebook_config['application'])
         else:
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            notebook_config['endpoint_name'],
-            notebook_config['application'])
-        notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}'.format(notebook_config['service_base_name'],
-                                                                                 os.environ['project_name'],
-                                                                                 os.environ['application'],
-                                                                                 os.environ['notebook_image_name']).lower().replace('_', '-') if (x != 'None' and x != '')
+                notebook_config['service_base_name'],
+                notebook_config['endpoint_name'],
+                notebook_config['application'])
+        notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}-{4}'.format(
+            notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+            os.environ['application'], os.environ['notebook_image_name']).replace('_', '-') if (x != 'None' and x != '')
             else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
         print('Searching pre-configured images')
         notebook_config['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
-        if AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['notebook_image_name']):
+        if AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['notebook_image_name']):
             notebook_config['image_name'] = notebook_config['notebook_image_name']
             notebook_config['image_type'] = 'pre-configured'
             print('Pre-configured image found. Using: {}'.format(notebook_config['notebook_image_name']))
@@ -118,27 +124,26 @@
             print('No pre-configured image found. Using default one: {}'.format(notebook_config['image_name']))
     except Exception as err:
         print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
-        edge_status = AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                      '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
-                                                                                notebook_config['project_name'],
-                                                                                notebook_config['endpoint_name']))
+        edge_status = AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                    '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
+                                                                              notebook_config['project_name'],
+                                                                              notebook_config['endpoint_name']))
 
         if edge_status != 'running':
             logging.info('ERROR: Edge node is unavailable! Aborting...')
             print('ERROR: Edge node is unavailable! Aborting...')
-            ssn_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
+            ssn_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
                                                               os.environ['conf_service_base_name'] + '-ssn')
-            put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                                ssn_hostname)
-            append_result("Edge node is unavailable")
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
             sys.exit(1)
     except Exception as err:
-        print("Failed to verify edge status.")
-        append_result("Failed to verify edge status.", str(err))
+        dlab.fab.append_result("Failed to verify edge status.", str(err))
         sys.exit(1)
 
     with open('/root/result.json', 'w') as f:
@@ -157,20 +162,20 @@
             format(notebook_config['instance_name'], notebook_config['instance_size'], notebook_config['region'],
                    notebook_config['vpc_name'], notebook_config['network_interface_name'],
                    notebook_config['security_group_name'], notebook_config['private_subnet_name'],
-                   notebook_config['service_base_name'], notebook_config['resource_group_name'], initial_user,
-                   'None', notebook_config['public_ssh_key'], notebook_config['primary_disk_size'], 'notebook',
-                   notebook_config['project_name'], notebook_config['instance_storage_account_type'],
-                   notebook_config['image_name'], notebook_config['image_type'], json.dumps(notebook_config['tags']))
+                   notebook_config['service_base_name'], notebook_config['resource_group_name'],
+                   notebook_config['initial_user'], 'None', notebook_config['public_ssh_key'],
+                   notebook_config['primary_disk_size'], 'notebook', notebook_config['project_name'],
+                   notebook_config['instance_storage_account_type'], notebook_config['image_name'],
+                   notebook_config['image_type'], json.dumps(notebook_config['tags']))
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         except:
             print("The instance hasn't been created.")
-        append_result("Failed to create instance.", str(err))
+        dlab.fab.append_result("Failed to create instance.", str(err))
         sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
index 5dcbf3e..ab3c080 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 import argparse
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,6 +42,8 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
     notebook_config['service_base_name'] = os.environ['conf_service_base_name']
@@ -51,37 +55,37 @@
         print('[START NOTEBOOK]')
         try:
             print("Starting notebook")
-            AzureActions().start_instance(notebook_config['resource_group_name'], notebook_config['notebook_name'])
+            AzureActions.start_instance(notebook_config['resource_group_name'], notebook_config['notebook_name'])
             print("Instance {} has been started".format(notebook_config['notebook_name']))
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to start notebook.", str(err))
             raise Exception
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to start notebook.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[SETUP USER GIT CREDENTIALS]')
         print('[SETUP USER GIT CREDENTIALS]')
-        notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+        notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
             notebook_config['resource_group_name'], notebook_config['notebook_name'])
         notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to setup git credentials.", str(err))
             raise Exception
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
         sys.exit(1)
 
     if os.environ['azure_datalake_enable'] == 'true':
         try:
             logging.info('[UPDATE STORAGE CREDENTIALS]')
             print('[UPDATE STORAGE CREDENTIALS]')
-            notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+            notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
                 notebook_config['resource_group_name'], notebook_config['notebook_name'])
             env.hosts = "{}".format(notebook_config['notebook_ip'])
             env.user = os.environ['conf_os_user']
@@ -90,13 +94,14 @@
             params = '--refresh_token {}'.format(os.environ['azure_user_refresh_token'])
             try:
                 put('~/scripts/common_notebook_update_refresh_token.py', '/tmp/common_notebook_update_refresh_token.py')
-                sudo('mv /tmp/common_notebook_update_refresh_token.py /usr/local/bin/common_notebook_update_refresh_token.py')
+                sudo('mv /tmp/common_notebook_update_refresh_token.py '
+                     '/usr/local/bin/common_notebook_update_refresh_token.py')
                 sudo("/usr/bin/python /usr/local/bin/{}.py {}".format('common_notebook_update_refresh_token', params))
-            except Exception as err:
+            except:
                 traceback.print_exc()
-                append_result("Failed to update storage credentials.", str(err))
                 raise Exception
-        except:
+        except Exception as err:
+            dlab.fab.append_result("Failed to update storage credentials.", str(err))
             sys.exit(1)
 
     try:
@@ -106,16 +111,16 @@
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
         try:
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
             raise Exception
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Failed to update last activity time.", str(err))
         sys.exit(1)
 
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                 notebook_config['notebook_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['notebook_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(notebook_config['notebook_name']))
@@ -126,8 +131,8 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
 
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
index 4c4ba17..5e77666 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
@@ -24,9 +24,9 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
 import argparse
@@ -37,26 +37,26 @@
     print("Stopping data engine cluster")
     cluster_list = []
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "notebook_name" in vm.tags:
                 if notebook_name == vm.tags['notebook_name']:
                     if 'master' == vm.tags["Type"]:
                         cluster_list.append(vm.tags["Name"])
-                    AzureActions().stop_instance(resource_group_name, vm.name)
+                    AzureActions.stop_instance(resource_group_name, vm.name)
                     print("Instance {} has been stopped".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop clusters", str(err))
         sys.exit(1)
 
     print("Stopping notebook")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if notebook_name == vm.tags["Name"]:
-                    AzureActions().stop_instance(resource_group_name, vm.name)
+                    AzureActions.stop_instance(resource_group_name, vm.name)
                     print("Instance {} has been stopped".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop instance", str(err))
         sys.exit(1)
 
 
@@ -69,15 +69,17 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        notebook_config['exploratory_name'] = os.environ['exploratory_name']
+    else:
         notebook_config['exploratory_name'] = ''
-    try:
-        notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        notebook_config['computational_name'] = os.environ['computational_name']
+    else:
         notebook_config['computational_name'] = ''
     notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -87,18 +89,15 @@
     try:
         stop_notebook(notebook_config['resource_group_name'], notebook_config['notebook_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop notebook.", str(err))
         sys.exit(1)
 
-
     try:
         with open("/root/result.json", 'w') as result:
             res = {"notebook_name": notebook_config['notebook_name'],
                    "Action": "Stop notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
index e08130d..73eab17 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
@@ -24,34 +24,35 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
+import traceback
 
 
 def terminate_nb(resource_group_name, notebook_name):
     print("Terminating data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "notebook_name" in vm.tags:
                 if notebook_name == vm.tags['notebook_name']:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate clusters", str(err))
         sys.exit(1)
 
     print("Terminating notebook")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if notebook_name == vm.tags["Name"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instance", str(err))
         sys.exit(1)
 
 
@@ -63,15 +64,17 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    try:
-        notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        notebook_config['exploratory_name'] = os.environ['exploratory_name']
+    else:
         notebook_config['exploratory_name'] = ''
-    try:
-        notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        notebook_config['computational_name'] = os.environ['computational_name']
+    else:
         notebook_config['computational_name'] = ''
     notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -83,7 +86,7 @@
             terminate_nb(notebook_config['resource_group_name'], notebook_config['notebook_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate notebook.", str(err))
+            dlab.fab.append_result("Failed to terminate notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -94,6 +97,6 @@
                    "Action": "Terminate notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
index ff186ac..c74abfe 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
@@ -21,23 +21,26 @@
 #
 # ******************************************************************************
 
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import sys
 import json
+import os
 
 
 if __name__ == "__main__":
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         image_conf = dict()
         image_conf['service_base_name'] = os.environ['conf_service_base_name']
         image_conf['resource_group_name'] = os.environ['azure_resource_group_name']
         image_conf['full_image_name'] = os.environ['notebook_image_name']
 
-        image = AzureMeta().get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+        image = AzureMeta.get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
         if image != '':
-            AzureActions().remove_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+            AzureActions.remove_image(image_conf['resource_group_name'], image_conf['full_image_name'])
 
             with open("/root/result.json", 'w') as result:
                 res = {"notebook_image_name": image_conf['full_image_name'],
@@ -45,6 +48,5 @@
                        "Action": "Delete existing notebook image"}
                 result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to delete existing notebook image", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to delete existing notebook image", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
index a31cfeb..8d90b5e 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -37,7 +38,7 @@
 
 def configure_slave(slave_number, data_engine):
     slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
-    slave_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], slave_name)
+    slave_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'], slave_name)
     try:
         logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
         print('[CREATING DLAB SSH USER ON SLAVE NODE]')
@@ -51,18 +52,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to create ssh user on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON SLAVE]')
         logging.info('[INSTALLING USERs KEY ON SLAVE]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
             slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
@@ -72,13 +69,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'],
-                                       data_engine['master_node_name'])
-        append_result("Failed to install user ssh key on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install user ssh key on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -92,12 +84,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to clean slave instance..", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean slave instance..", str(err))
         sys.exit(1)
 
     try:
@@ -113,12 +101,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to configure proxy on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -133,13 +117,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to install prerequisites on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -157,16 +136,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed configuring slave node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to configure slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure slave node.", str(err))
         sys.exit(1)
 
 
+def clear_resources():
+    for i in range(data_engine['instance_count'] - 1):
+        slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+        AzureActions.remove_instance(data_engine['resource_group_name'], slave_name)
+    AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+
+
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
                                                os.environ['request_id'])
@@ -176,38 +157,41 @@
                         filename=local_log_filepath)
 
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         print('Generating infrastructure names and tags')
         data_engine = dict()
-        try:
-            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name']
+        else:
             data_engine['exploratory_name'] = ''
-        try:
-            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name']
+        else:
             data_engine['computational_name'] = ''
         data_engine['service_base_name'] = os.environ['conf_service_base_name']
         data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
         data_engine['region'] = os.environ['azure_region']
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['vpc_name'] = os.environ['azure_vpc_name']
-        data_engine['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        data_engine['endpoint_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['private_subnet_name'] = '{}-{}-subnet'.format(data_engine['service_base_name'],
-                                                                   data_engine['project_name'])
-        data_engine['private_subnet_cidr'] = AzureMeta().get_subnet(data_engine['resource_group_name'],
-                                                                    data_engine['vpc_name'],
-                                                                    data_engine['private_subnet_name']).address_prefix
-        data_engine['master_security_group_name'] = '{}-{}-dataengine-master-sg'.format(
-            data_engine['service_base_name'], data_engine['project_name'])
-        data_engine['slave_security_group_name'] = '{}-{}-dataengine-slave-sg'.format(data_engine['service_base_name'],
-                                                                                      data_engine['project_name'])
-        data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+        data_engine['user_name'] = os.environ['edge_user_name']
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['project_tag'] = data_engine['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
+        data_engine['private_subnet_name'] = '{}-{}-{}-subnet'.format(data_engine['service_base_name'],
+                                                                      data_engine['project_name'],
+                                                                      data_engine['endpoint_name'])
+        data_engine['private_subnet_cidr'] = AzureMeta.get_subnet(data_engine['resource_group_name'],
+                                                                  data_engine['vpc_name'],
+                                                                  data_engine['private_subnet_name']).address_prefix
+        data_engine['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                               data_engine['project_name'],
-                                                              data_engine['exploratory_name'],
+                                                              data_engine['endpoint_name'],
                                                               data_engine['computational_name'])
         data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
         data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
@@ -217,19 +201,20 @@
         data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
         data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
         data_engine['notebook_name'] = os.environ['notebook_instance_name']
-        master_node_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                           data_engine['master_node_name'])
+        master_node_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                data_engine['master_node_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
                                                        data_engine['project_name'],
                                                        data_engine['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                          edge_instance_name)
+        data_engine['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                       data_engine['region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(data_engine['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = data_engine['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         key = RSA.importKey(open(keyfile_name, 'rb').read())
         data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
@@ -240,13 +225,8 @@
             initial_user = 'ec2-user'
             sudo_group = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
@@ -262,18 +242,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to create ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on master.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON MASTER]')
         logging.info('[INSTALLING USERs KEY ON MASTER]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
             master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
@@ -284,12 +260,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to install ssh user key on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install ssh user key on master.", str(err))
         sys.exit(1)
 
 
@@ -304,12 +276,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to clean master instance.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to clean master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -325,12 +293,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to configure proxy on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on master.", str(err))
         sys.exit(1)
 
     try:
@@ -345,13 +309,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to install prerequisites on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
         sys.exit(1)
 
     try:
@@ -369,12 +328,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure master node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to configure master node", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -389,18 +344,15 @@
             if job.exitcode != 0:
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to configure slave nodes", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
-        notebook_instance_ip = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                                  data_engine['notebook_name'])
+        notebook_instance_ip = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                data_engine['notebook_name'])
         additional_info = {
             "computational_name": data_engine['computational_name'],
             "master_node_hostname": master_node_hostname,
@@ -425,19 +377,16 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to configure reverse proxy", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        ip_address = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                        data_engine['master_node_name'])
+        ip_address = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                      data_engine['master_node_name'])
         spark_master_url = "http://" + ip_address + ":8080"
         spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
@@ -463,6 +412,7 @@
                    }
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
index 24855fa..86dc7a9 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -43,44 +44,48 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         data_engine = dict()
-        data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        data_engine['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        data_engine['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
+        data_engine['user_name'] = os.environ['edge_user_name']
+        data_engine['project_name'] = os.environ['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name']
+        data_engine['project_tag'] = data_engine['project_name']
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
         print('Generating infrastructure names and tags')
-        try:
-            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name']
+        else:
             data_engine['exploratory_name'] = ''
-        try:
-            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name']
+        else:
             data_engine['computational_name'] = ''
         data_engine['service_base_name'] = os.environ['conf_service_base_name']
         data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
         data_engine['region'] = os.environ['azure_region']
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['vpc_name'] = os.environ['azure_vpc_name']
-        data_engine['private_subnet_name'] = '{}-{}-subnet'.format(data_engine['service_base_name'],
-                                                                   data_engine['project_name'])
-        data_engine['private_subnet_cidr'] = AzureMeta().get_subnet(data_engine['resource_group_name'],
-                                                                    data_engine['vpc_name'],
-                                                                    data_engine['private_subnet_name']).address_prefix
-        data_engine['master_security_group_name'] = '{}-{}-dataengine-master-sg'.format(data_engine['service_base_name'],
-                                                                                        data_engine['project_name'])
-        data_engine['slave_security_group_name'] = '{}-{}-dataengine-slave-sg'.format(data_engine['service_base_name'],
-                                                                                      data_engine['project_name'])
-        data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+        data_engine['private_subnet_name'] = '{}-{}-{}-subnet'.format(data_engine['service_base_name'],
+                                                                      data_engine['project_name'],
+                                                                      data_engine['endpoint_name'])
+        data_engine['private_subnet_cidr'] = AzureMeta.get_subnet(data_engine['resource_group_name'],
+                                                                  data_engine['vpc_name'],
+                                                                  data_engine['private_subnet_name']).address_prefix
+        data_engine['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+            data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+        data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                               data_engine['project_name'],
-                                                              data_engine['exploratory_name'],
+                                                              data_engine['endpoint_name'],
                                                               data_engine['computational_name'])
         data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
         data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
         data_engine['master_network_interface_name'] = '{}-nif'.format(data_engine['master_node_name'])
         data_engine['master_size'] = os.environ['azure_dataengine_master_size']
-        key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']), 'rb').read())
+        key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'],
+                                                   os.environ['conf_key_name']), 'rb').read())
         data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
         data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
@@ -106,20 +111,19 @@
         data_engine['image_type'] = 'default'
 
         if os.environ['conf_shared_image_enabled'] == 'false':
-            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                         os.environ['endpoint_name'],
-                                                                                         os.environ['project_name'],
-                                                                                         os.environ['application'])
+            data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'],
+                os.environ['application'])
         else:
             data_engine['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(data_engine['service_base_name'],
-                                                                                     os.environ['endpoint_name'],
+                                                                                     data_engine['endpoint_name'],
                                                                                      os.environ['application'])
 
         data_engine['notebook_image_name'] = (lambda x: os.environ['notebook_image_name'] if x != 'None'
                     else data_engine['expected_image_name'])(str(os.environ.get('notebook_image_name')))
 
         print('Searching pre-configured images')
-        if AzureMeta().get_image(data_engine['resource_group_name'], data_engine['notebook_image_name']) and \
+        if AzureMeta.get_image(data_engine['resource_group_name'], data_engine['notebook_image_name']) and \
                         os.environ['application'] in os.environ['dataengine_image_notebooks'].split(','):
             data_engine['image_name'] = data_engine['notebook_image_name']
             data_engine['image_type'] = 'pre-configured'
@@ -128,26 +132,25 @@
             data_engine['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
             print('No pre-configured image found. Using default one: {}'.format(data_engine['image_name']))
     except Exception as err:
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
         sys.exit(1)
 
     try:
-        edge_status = AzureMeta().get_instance_status(data_engine['resource_group_name'], '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
-                                                                                data_engine['project_name'],
-                                                                                data_engine['endpoint_name']))
+        edge_status = AzureMeta.get_instance_status(data_engine['resource_group_name'],
+                                                    '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
+                                                                              data_engine['project_name'],
+                                                                              data_engine['endpoint_name']))
         if edge_status != 'running':
             logging.info('ERROR: Edge node is unavailable! Aborting...')
             print('ERROR: Edge node is unavailable! Aborting...')
-            ssn_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
-                                                              os.environ['conf_service_base_name'] + '-ssn')
-            put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                                ssn_hostname)
-            append_result("Edge node is unavailable")
+            ssn_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                            data_engine['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
             sys.exit(1)
     except Exception as err:
-        print("Failed to verify edge status.")
-        append_result("Failed to verify edge status.", str(err))
+        dlab.fab.append_result("Failed to verify edge status.", str(err))
         sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
@@ -182,12 +185,11 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+            AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
         except:
             print("The instance hasn't been created.")
-        append_result("Failed to create master instance.", str(err))
+        dlab.fab.append_result("Failed to create master instance.", str(err))
         sys.exit(1)
 
     try:
@@ -217,13 +219,12 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         for i in range(data_engine['instance_count'] - 1):
             slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
             try:
-                AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
+                AzureActions.remove_instance(data_engine['resource_group_name'], slave_name)
             except:
                 print("The slave instance {} hasn't been created.".format(slave_name))
-        AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
-        append_result("Failed to create slave instances.", str(err))
+        AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+        dlab.fab.append_result("Failed to create slave instances.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
index cf2a613..308912f 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
@@ -24,23 +24,25 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
+from fabric.api import *
 
 
 def start_data_engine(resource_group_name, cluster_name):
     print("Starting data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if cluster_name == vm.tags["Name"]:
-                    AzureActions().start_instance(resource_group_name, vm.name)
+                    AzureActions.start_instance(resource_group_name, vm.name)
                     print("Instance {} has been started".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to start dataengine", str(err))
         sys.exit(1)
 
 
@@ -52,23 +54,26 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name']
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name']
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
+    data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                           data_engine['project_name'],
-                                                          data_engine['exploratory_name'],
+                                                          data_engine['endpoint_name'],
                                                           data_engine['computational_name'])
     try:
         logging.info('[STARTING DATA ENGINE]')
@@ -86,8 +91,10 @@
         logging.info('[UPDATE LAST ACTIVITY TIME]')
         print('[UPDATE LAST ACTIVITY TIME]')
         data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
-        data_engine['notebook_ip'] = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], os.environ['notebook_instance_name'])
-        data_engine['computational_ip'] = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], data_engine['computational_id'])
+        data_engine['notebook_ip'] = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                      os.environ['notebook_instance_name'])
+        data_engine['computational_ip'] = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+                                                                           data_engine['computational_id'])
         data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
             .format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -96,18 +103,17 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
 
-
     try:
         with open("/root/result.json", 'w') as result:
             res = {"service_base_name": data_engine['service_base_name'],
                    "Action": "Start Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
index ef1521f..963c555 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
@@ -24,23 +24,24 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
 
 
 def stop_data_engine(resource_group_name, cluster_name):
     print("Stopping data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if cluster_name == vm.tags["Name"]:
-                    AzureActions().stop_instance(resource_group_name, vm.name)
+                    AzureActions.stop_instance(resource_group_name, vm.name)
                     print("Instance {} has been stopped".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop dataengine", str(err))
         sys.exit(1)
 
 
@@ -52,23 +53,26 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name']
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name']
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
     data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
-    data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+    data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                           data_engine['project_name'],
-                                                          data_engine['exploratory_name'],
+                                                          data_engine['endpoint_name'],
                                                           data_engine['computational_name'])
     try:
         logging.info('[STOPPING DATA ENGINE]')
@@ -77,7 +81,7 @@
             stop_data_engine(data_engine['resource_group_name'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to stop Data Engine.", str(err))
+            dlab.fab.append_result("Failed to stop Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -88,6 +92,6 @@
                    "Action": "Stop Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
index 974fc3e..1363eb8 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
@@ -24,30 +24,31 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
 
 
 def terminate_data_engine(resource_group_name, notebook_name, os_user, key_path, cluster_name):
     print("Terminating data engine cluster")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if "Name" in vm.tags:
                 if cluster_name == vm.tags["Name"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine", str(err))
         sys.exit(1)
 
     print("Removing Data Engine kernels from notebook")
     try:
-        AzureActions().remove_dataengine_kernels(resource_group_name, notebook_name, os_user, key_path, cluster_name)
+        AzureActions.remove_dataengine_kernels(resource_group_name, notebook_name, os_user, key_path, cluster_name)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove dataengine kernels from notebook", str(err))
         sys.exit(1)
 
 
@@ -59,23 +60,26 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name']
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name']
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name']
+    data_engine['endpoint_name'] = os.environ['endpoint_name']
+    data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
                                                           data_engine['project_name'],
-                                                          data_engine['exploratory_name'],
+                                                          data_engine['endpoint_name'],
                                                           data_engine['computational_name'])
     data_engine['notebook_name'] = os.environ['notebook_instance_name']
     data_engine['key_path'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
@@ -89,7 +93,7 @@
                                   os.environ['conf_os_user'], data_engine['key_path'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Data Engine.", str(err))
+            dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -100,6 +104,6 @@
                    "Action": "Terminate Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
index 3a64bf6..37473b7 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,31 +41,34 @@
                         filename=local_log_filepath)
 
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
         notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'], os.environ['endpoint_name'],
-                                                                notebook_config['exploratory_name'])
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
                                              "SBN": notebook_config['service_base_name'],
@@ -85,7 +90,8 @@
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
         notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'], os.environ['endpoint_name'])
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -94,45 +100,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                           edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -140,9 +146,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -151,16 +156,16 @@
         print('[CONFIGURE PROXY ON DEEP LEARNING  INSTANCE]')
         additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
         params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -173,12 +178,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -194,9 +198,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -206,7 +209,7 @@
                  "--os_user {2} --jupyter_version {3} " \
                  "--scala_version {4} --spark_version {5} " \
                  "--hadoop_version {6} --region {7} " \
-                 "--r_mirror {8} --ip_adress {9} --exploratory_name {10} --edge_ip {11}" \
+                 "--r_mirror {8} --ip_address {9} --exploratory_name {10} --edge_ip {11}" \
                  .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
                          os.environ['notebook_jupyter_version'], os.environ['notebook_scala_version'],
                          os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
@@ -214,15 +217,14 @@
                          notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_deep_learning_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Deep Learning node.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -233,12 +235,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -254,44 +255,44 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
                                                        notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
                                                                        notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                      'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -316,17 +317,16 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         tensorboard_ip_url = 'http://' + ip_address + ':6006'
         jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
@@ -373,7 +373,6 @@
                ]}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate output information.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py b/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
index fd2f940..0949946 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
@@ -22,10 +22,16 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
+import uuid
+from fabric.api import *
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -34,112 +40,121 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
+
+    def clear_resources():
+        AzureActions.remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+        AzureActions.remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
+                                   edge_conf['private_subnet_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'],
+                                           edge_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'],
+                                           edge_conf['master_security_group_name'])
+        AzureActions.remove_security_group(edge_conf['resource_group_name'],
+                                           edge_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(edge_conf['resource_group_name']):
+            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
+                AzureActions.remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
+        if os.environ['azure_datalake_enable'] == 'true':
+            for datalake in AzureMeta.list_datalakes(edge_conf['resource_group_name']):
+                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
+                    AzureActions.remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+
     try:
         print('Generating infrastructure names and tags')
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         edge_conf = dict()
-
-        edge_conf['service_base_name'] = os.environ['conf_service_base_name']
+        edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
         edge_conf['key_name'] = os.environ['conf_key_name']
         edge_conf['vpc_name'] = os.environ['azure_vpc_name']
         edge_conf['region'] = os.environ['azure_region']
         edge_conf['subnet_name'] = os.environ['azure_subnet_name']
-        edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        edge_conf['user_keyname'] = os.environ['project_name']
-        edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-subnet'
+        edge_conf['project_name'] = (os.environ['project_name'])
+        edge_conf['endpoint_name'] = (os.environ['endpoint_name'])
+        edge_conf['user_keyname'] = edge_conf['project_name']
+        edge_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(edge_conf['service_base_name'],
+                                                                    edge_conf['project_name'],
+                                                                    edge_conf['endpoint_name'])
         edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                                edge_conf['project_name'], edge_conf['endpoint_name'])
-        edge_conf['network_interface_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + \
-                                              '-edge-nif'
-        edge_conf['static_public_ip_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + \
-                                             '-edge-ip'
-        edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0'
-        edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + edge_conf['region'] + \
-                                         '.cloudapp.azure.com'
-        edge_conf['user_storage_account_name'] = '{0}-{1}-{2}-storage'.format(edge_conf['service_base_name'],
-                                                                              edge_conf['project_name'],
-                                                                              edge_conf['endpoint_name'])
-        edge_conf['user_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-' + edge_conf['endpoint_name'] +
-                                            '-container').lower()
-        edge_conf['shared_storage_account_name'] = '{0}-{1}-shared-storage'.format(edge_conf['service_base_name'],
-                                                                                   edge_conf['endpoint_name'])
-        edge_conf['shared_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['endpoint_name'] + '-shared-container').lower()
-        edge_conf['datalake_store_name'] = edge_conf['service_base_name'] + '-ssn-datalake'
-        edge_conf['datalake_shared_directory_name'] = edge_conf['service_base_name'] + '-shared-folder'
-        edge_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(edge_conf['service_base_name'],
-                                                                            edge_conf['project_name'])
-        edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg'
-        edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + "-" + os.environ['endpoint_name'] +\
-                                                    '-nb-sg'
-        edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                    + edge_conf['project_name'] + '-dataengine-master-sg'
-        edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                   + edge_conf['project_name'] + '-dataengine-slave-sg'
+        edge_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_conf['instance_name'],
+                                                                                edge_conf['region'])
+        edge_conf['user_storage_account_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                                             edge_conf['project_name'],
+                                                                             edge_conf['endpoint_name'])
+        edge_conf['user_container_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                                       edge_conf['project_name'],
+                                                                       edge_conf['endpoint_name'])
+        edge_conf['shared_storage_account_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                                  edge_conf['endpoint_name'])
+        edge_conf['shared_container_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                            edge_conf['endpoint_name'])
+        edge_conf['datalake_store_name'] = '{}-ssn-datalake'.format(edge_conf['service_base_name'])
+        edge_conf['datalake_shared_directory_name'] = '{}-shared-folder'.format(edge_conf['service_base_name'])
+        edge_conf['datalake_user_directory_name'] = '{0}-{1}-{2}-folder'.format(edge_conf['service_base_name'],
+                                                                                edge_conf['project_name'],
+                                                                                edge_conf['endpoint_name'])
+        edge_conf['edge_security_group_name'] = '{}-sg'.format(edge_conf['instance_name'])
+        edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
+                                                                            edge_conf['project_name'],
+                                                                            edge_conf['endpoint_name'])
+        edge_conf['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(edge_conf['service_base_name'],
+                                                                                 edge_conf['project_name'],
+                                                                                 edge_conf['endpoint_name'])
+        edge_conf['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(edge_conf['service_base_name'],
+                                                                               edge_conf['project_name'],
+                                                                               edge_conf['endpoint_name'])
         edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-        keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
-        edge_conf['private_subnet_cidr'] = AzureMeta().get_subnet(edge_conf['resource_group_name'],
-                                                                  edge_conf['vpc_name'],
-                                                                  edge_conf['private_subnet_name']).address_prefix
+        edge_conf['keyfile_name'] = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
+        edge_conf['private_subnet_cidr'] = AzureMeta.get_subnet(edge_conf['resource_group_name'],
+                                                                edge_conf['vpc_name'],
+                                                                edge_conf['private_subnet_name']).address_prefix
         if os.environ['conf_network_type'] == 'private':
-            edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
-                                                                              edge_conf['instance_name'])
-            edge_conf['edge_public_ip'] =  edge_conf['edge_private_ip']
+            edge_conf['edge_private_ip'] = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+                                                                            edge_conf['instance_name'])
+            edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
+            edge_conf['instance_hostname'] = edge_conf['edge_private_ip']
         else:
-            edge_conf['edge_public_ip'] = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
-                                                                                     edge_conf['instance_name'])
-            edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
-                                                                              edge_conf['instance_name'])
-        instance_hostname = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
-                                                               edge_conf['instance_name'])
-        edge_conf['vpc_cidrs'] = AzureMeta().get_vpc(edge_conf['resource_group_name'],
-                                                     edge_conf['vpc_name']).address_space.address_prefixes
+            edge_conf['edge_public_ip'] = AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+                                                                                   edge_conf['instance_name'])
+            edge_conf['edge_private_ip'] = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+                                                                            edge_conf['instance_name'])
+            edge_conf['instance_hostname'] = edge_conf['instance_dns_name']
+        edge_conf['vpc_cidrs'] = AzureMeta.get_vpc(edge_conf['resource_group_name'],
+                                                   edge_conf['vpc_name']).address_space.address_prefixes
 
         if os.environ['conf_stepcerts_enabled'] == 'true':
-            step_cert_sans = ' --san {0} --san {1} '.format(AzureMeta().get_private_ip_address(
-                edge_conf['resource_group_name'], edge_conf['instance_name']), edge_conf['instance_dns_name'])
+            edge_conf['step_cert_sans'] = ' --san {0} '.format(AzureMeta.get_private_ip_address(
+                edge_conf['resource_group_name'], edge_conf['instance_name']))
             if os.environ['conf_network_type'] == 'public':
-                step_cert_sans += ' --san {0}'.format(
-                    AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
-                                                               edge_conf['instance_name']))
+                edge_conf['step_cert_sans'] += ' --san {0} --san {1} '.format(
+                    AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+                                                             edge_conf['instance_name']),
+                    edge_conf['instance_dns_name'])
         else:
-            step_cert_sans = ''
+            edge_conf['step_cert_sans'] = ''
 
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate infrastructure names", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            edge_conf['initial_user'] = 'ubuntu'
+            edge_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            edge_conf['initial_user'] = 'ec2-user'
+            edge_conf['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             edge_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            edge_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -147,57 +162,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING PREREQUISITES]')
         logging.info('[INSTALLING PREREQUISITES]')
-        params = "--hostname {} --keyfile {} --user {} --region {}".\
-            format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['azure_region'])
+        params = "--hostname {} --keyfile {} --user {} --region {}".format(
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+            os.environ['azure_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -205,40 +187,24 @@
         logging.info('[INSTALLING HTTP PROXY]')
         additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
                              "template_file": "/root/templates/squid.conf",
-                             "project_name": os.environ['project_name'],
+                             "project_name": edge_conf['project_name'],
                              "ldap_host": os.environ['ldap_hostname'],
                              "ldap_dn": os.environ['ldap_dn'],
                              "ldap_user": os.environ['ldap_service_username'],
                              "ldap_password": os.environ['ldap_service_password'],
                              "vpc_cidrs": edge_conf['vpc_cidrs'],
                              "allowed_ip_cidr": ['0.0.0.0/0']}
-        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
-                 .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('configure_http_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing http proxy.", str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing http proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
 
@@ -248,43 +214,27 @@
         additional_config = {"user_keyname": edge_conf['user_keyname'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+            edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key. Excpeption: " + str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing users key. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING NGINX REVERSE PROXY]')
         logging.info('[INSTALLING NGINX REVERSE PROXY]')
-        keycloak_client_secret = str(uuid.uuid4())
+        edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
         params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
-                 "--step_cert_sans '{}'" \
-            .format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'],
-                    edge_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'], keycloak_client_secret,
-                    step_cert_sans)
+                 "--step_cert_sans '{}'".format(
+                  edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+                  edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-' + edge_conf['endpoint_name'],
+                  edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
 
         try:
             local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
@@ -293,44 +243,28 @@
             raise Exception
         keycloak_params = "--service_base_name {} --keycloak_auth_server_url {} --keycloak_realm_name {} " \
                           "--keycloak_user {} --keycloak_user_password {} --keycloak_client_secret {} " \
-                          "--edge_public_ip {} --project_name {} --endpoint_name {} " \
-            .format(edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
-                    os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
-                    os.environ['keycloak_user_password'],
-                    keycloak_client_secret, edge_conf['edge_public_ip'], os.environ['project_name'], os.environ['endpoint_name'])
+                          "--edge_public_ip {} --project_name {} --endpoint_name {} ".format(
+                           edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
+                           os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
+                           os.environ['keycloak_user_password'],
+                           edge_conf['keycloak_client_secret'], edge_conf['instance_hostname'], edge_conf['project_name'],
+                           edge_conf['endpoint_name'])
         try:
             local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing Nginx reverse proxy. Excpeption: " + str(err))
-        AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
-        AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
-                                     edge_conf['private_subnet_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                             edge_conf['master_security_group_name'])
-        AzureActions().remove_security_group(edge_conf['resource_group_name'],
-                                                 edge_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
-            if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
-        if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
-                if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed installing Nginx reverse proxy. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
+        for storage_account in AzureMeta.list_storage_accounts(edge_conf['resource_group_name']):
             if edge_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                shared_storage_account_name = storage_account.name
+                edge_conf['shared_storage_account_name'] = storage_account.name
             if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
-                user_storage_account_name = storage_account.name
+                edge_conf['user_storage_account_name'] = storage_account.name
 
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
@@ -339,13 +273,13 @@
         print("Public IP: {}".format(edge_conf['edge_public_ip']))
         print("Private IP: {}".format(edge_conf['edge_private_ip']))
         print("Key name: {}".format(edge_conf['key_name']))
-        print("User storage account name: {}".format(user_storage_account_name))
+        print("User storage account name: {}".format(edge_conf['user_storage_account_name']))
         print("User container name: {}".format(edge_conf['user_container_name']))
         if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
+            for datalake in AzureMeta.list_datalakes(edge_conf['resource_group_name']):
                 if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    datalake_id = datalake.name
-            print("Data Lake name: {}".format(datalake_id))
+                    edge_conf['datalake_id'] = datalake.name
+            print("Data Lake name: {}".format(edge_conf['datalake_id']))
             print("Data Lake tag name: {}".format(edge_conf['datalake_store_name']))
             print("Data Lake Store user directory name: {}".format(edge_conf['datalake_user_directory_name']))
         print("Notebook SG: {}".format(edge_conf['notebook_security_group_name']))
@@ -357,9 +291,9 @@
                        "public_ip": edge_conf['edge_public_ip'],
                        "ip": edge_conf['edge_private_ip'],
                        "key_name": edge_conf['key_name'],
-                       "user_storage_account_name": user_storage_account_name,
+                       "user_storage_account_name": edge_conf['user_storage_account_name'],
                        "user_container_name": edge_conf['user_container_name'],
-                       "shared_storage_account_name": shared_storage_account_name,
+                       "shared_storage_account_name": edge_conf['shared_storage_account_name'],
                        "shared_container_name": edge_conf['shared_container_name'],
                        "user_storage_account_tag_name": edge_conf['user_storage_account_name'],
                        "tunnel_port": "22",
@@ -369,7 +303,7 @@
                        "notebook_subnet": edge_conf['private_subnet_cidr'],
                        "instance_id": edge_conf['instance_name'],
                        "full_edge_conf": edge_conf,
-                       "project_name": os.environ['project_name'],
+                       "project_name": edge_conf['project_name'],
                        "@class": "com.epam.dlab.dto.azure.edge.EdgeInfoAzure",
                        "Action": "Create new EDGE server"}
             else:
@@ -377,12 +311,12 @@
                        "public_ip": edge_conf['edge_public_ip'],
                        "ip": edge_conf['edge_private_ip'],
                        "key_name": edge_conf['key_name'],
-                       "user_storage_account_name": user_storage_account_name,
+                       "user_storage_account_name": edge_conf['user_storage_account_name'],
                        "user_container_name": edge_conf['user_container_name'],
-                       "shared_storage_account_name": shared_storage_account_name,
+                       "shared_storage_account_name": edge_conf['shared_storage_account_name'],
                        "shared_container_name": edge_conf['shared_container_name'],
                        "user_storage_account_tag_name": edge_conf['user_storage_account_name'],
-                       "datalake_name": datalake_id,
+                       "datalake_name": edge_conf['datalake_id'],
                        "datalake_tag_name": edge_conf['datalake_store_name'],
                        "datalake_shared_directory_name": edge_conf['datalake_shared_directory_name'],
                        "datalake_user_directory_name": edge_conf['datalake_user_directory_name'],
@@ -393,11 +327,12 @@
                        "notebook_subnet": edge_conf['private_subnet_cidr'],
                        "instance_id": edge_conf['instance_name'],
                        "full_edge_conf": edge_conf,
-                       "project_name": os.environ['project_name'],
+                       "project_name": edge_conf['project_name'],
                        "@class": "com.epam.dlab.dto.azure.edge.EdgeInfoAzure",
                        "Action": "Create new EDGE server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
index d4bda85..9dc1b01 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
@@ -59,14 +59,14 @@
         edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
         edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
         edge_conf['instance_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge'
-        edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0'
+        edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-volume-primary'
         edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg'
         edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + "-" + os.environ['endpoint_name']\
             + '-nb-sg'
         edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                    + edge_conf['user_name'] + '-dataengine-master-sg'
+                                                    + edge_conf['user_name'] + '-de-master-sg'
         edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \
-                                                   + edge_conf['user_name'] + '-dataengine-slave-sg'
+                                                   + edge_conf['user_name'] + '-de-slave-sg'
         edge_conf['edge_storage_account_name'] = ('{0}-{1}-{2}-storage'.format(edge_conf['service_base_name'],
                                                                                edge_conf['user_name'],
                                                                                edge_conf['endpoint_name']))
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_start.py b/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
index 445f48d..04e57ae 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import logging
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,29 +39,31 @@
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     edge_conf = dict()
     edge_conf['service_base_name'] = os.environ['conf_service_base_name']
     edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+    edge_conf['project_name'] = os.environ['project_name']
+    edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + os.environ['azure_region'] + '.cloudapp.azure.com'
+    edge_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_conf['instance_name'],
+                                                                            os.environ['azure_region'])
 
     logging.info('[START EDGE]')
     print('[START EDGE]')
     try:
-        AzureActions().start_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+        AzureActions.start_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start edge.", str(err))
+        dlab.fab.append_result("Failed to start edge.", str(err))
         sys.exit(1)
 
     try:
-        public_ip_address = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
-                                                                       edge_conf['instance_name'])
-        private_ip_address = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
-                                                                         edge_conf['instance_name'])
+        public_ip_address = AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+                                                                     edge_conf['instance_name'])
+        private_ip_address = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+                                                              edge_conf['instance_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
@@ -72,7 +78,7 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_status.py b/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
index 8c16d12..1b3fd15 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
@@ -23,14 +23,20 @@
 
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+from fabric.api import *
+import traceback
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
@@ -44,13 +50,12 @@
         logging.info('[COLLECT DATA]')
         print('[COLLECTING DATA]')
         params = '--resource_group_name {} --list_resources "{}"'.format(edge_conf['resource_group_name'],
-                                                                      os.environ['edge_list_resources'])
+                                                                         os.environ['edge_list_resources'])
         try:
             local("~/scripts/{}.py {}".format('common_collect_data', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to collect necessary information.", str(err))
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to collect necessary information.", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py b/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
index 1bb319b..dfc4cba 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
@@ -20,10 +20,13 @@
 # under the License.
 #
 # ******************************************************************************
-
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import os
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,21 +38,22 @@
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     edge_conf = dict()
     edge_conf['service_base_name'] = os.environ['conf_service_base_name']
     edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+    edge_conf['project_name'] = os.environ['project_name']
+    edge_conf['endpoint_name'] = os.environ['endpoint_name']
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
 
     logging.info('[STOP EDGE]')
     print('[STOP EDGE]')
     try:
-        AzureActions().stop_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+        AzureActions.stop_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop edge.", str(err))
+        dlab.fab.append_result("Failed to stop edge.", str(err))
         sys.exit(1)
 
     try:
@@ -58,7 +62,7 @@
                    "Action": "Stop edge server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
index d785f23..a61c75d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
@@ -22,128 +22,137 @@
 # ******************************************************************************
 
 import json
-import sys, time, os
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
 
 
 def terminate_edge_node(resource_group_name, service_base_name, project_tag, subnet_name, vpc_name):
     print("Terminating EDGE, notebook and dataengine virtual machines")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             try:
                 if project_tag == vm.tags["project_tag"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instance", str(err))
         sys.exit(1)
 
     print("Removing network interfaces")
     try:
-        for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+        for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
             try:
                 if project_tag == network_interface.tags["project_tag"]:
-                    AzureActions().delete_network_if(resource_group_name, network_interface.name)
+                    AzureActions.delete_network_if(resource_group_name, network_interface.name)
                     print("Network interface {} has been removed".format(network_interface.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove network interfaces", str(err))
         sys.exit(1)
 
     print("Removing static public IPs")
     try:
-        for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+        for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
             try:
                 if project_tag in static_public_ip.tags["project_tag"]:
-                    AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+                    AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
                     print("Static public IP {} has been removed".format(static_public_ip.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IPs", str(err))
         sys.exit(1)
 
     print("Removing disks")
     try:
-        for disk in AzureMeta().list_disks(resource_group_name):
+        for disk in AzureMeta.list_disks(resource_group_name):
             try:
                 if project_tag in disk.tags["project_tag"]:
-                    AzureActions().remove_disk(resource_group_name, disk.name)
+                    AzureActions.remove_disk(resource_group_name, disk.name)
                     print("Disk {} has been removed".format(disk.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove disks", str(err))
         sys.exit(1)
 
     print("Removing storage account")
     try:
-        for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+        for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
             try:
                 if project_tag == storage_account.tags["project_tag"]:
-                    AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+                    AzureActions.remove_storage_account(resource_group_name, storage_account.name)
                     print("Storage account {} has been terminated".format(storage_account.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove storage accounts", str(err))
         sys.exit(1)
 
     print("Deleting Data Lake Store directory")
     try:
-        for datalake in AzureMeta().list_datalakes(resource_group_name):
+        for datalake in AzureMeta.list_datalakes(resource_group_name):
             try:
                 if service_base_name == datalake.tags["SBN"]:
-                    AzureActions().remove_datalake_directory(datalake.name, project_tag + '-folder')
+                    AzureActions.remove_datalake_directory(datalake.name, project_tag + '-folder')
                     print("Data Lake Store directory {} has been deleted".format(project_tag + '-folder'))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove Data Lake", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+        for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
             try:
                 if project_tag == sg.tags["project_tag"]:
-                    AzureActions().remove_security_group(resource_group_name, sg.name)
+                    AzureActions.remove_security_group(resource_group_name, sg.name)
                     print("Security group {} has been terminated".format(sg.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        AzureActions().remove_subnet(resource_group_name, vpc_name, subnet_name)
+        AzureActions.remove_subnet(resource_group_name, vpc_name, subnet_name)
         print("Private subnet {} has been terminated".format(subnet_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnet", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     edge_conf = dict()
     edge_conf['service_base_name'] = os.environ['conf_service_base_name']
     edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    edge_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-    edge_conf['project_name'] = os.environ['project_name'].replace('_', '-')
-    edge_conf['project_tag'] = os.environ['project_name'].replace('_', '-')
-    edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + '-subnet'
+    edge_conf['user_name'] = os.environ['edge_user_name']
+    edge_conf['project_name'] = os.environ['project_name']
+    edge_conf['project_tag'] = edge_conf['project_name']
+    edge_conf['endpoint_name'] = os.environ['endpoint_name']
+    edge_conf['private_subnet_name'] = "{}-{}-{}-subnet".format(edge_conf['service_base_name'],
+                                                                edge_conf['project_name'], edge_conf['endpoint_name'])
     edge_conf['vpc_name'] = os.environ['azure_vpc_name']
 
 
@@ -153,10 +162,11 @@
         try:
             terminate_edge_node(edge_conf['resource_group_name'], edge_conf['service_base_name'],
                                 edge_conf['project_tag'], edge_conf['private_subnet_name'], edge_conf['vpc_name'])
-        except Exception as err:
+        except:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
-    except:
+            raise Exception
+    except Exception as err:
+        dlab.fab.append_result("Failed to terminate edge.", str(err))
         sys.exit(1)
 
     try:
@@ -166,6 +176,6 @@
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
index 8d9ac96..f5f9168 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -38,53 +40,57 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
         notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'], os.environ['endpoint_name'],
-                                                                notebook_config['exploratory_name'])
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            notebook_config['endpoint_name'],
-            notebook_config['project_name'],
-            os.environ['application'])
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
-                                       "SBN": notebook_config['service_base_name'],
-                                       "User": notebook_config['user_name'],
-                                       "project_tag": notebook_config['project_tag'],
-                                       "endpoint_tag": notebook_config['endpoint_tag'],
-                                       "Exploratory": notebook_config['exploratory_name'],
-                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "project_tag": notebook_config['project_tag'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         else:
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
                 notebook_config['service_base_name'],
                 notebook_config['endpoint_name'],
                 os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
-                                       "SBN": notebook_config['service_base_name'],
-                                       "User": notebook_config['user_name'],
-                                       "endpoint_tag": notebook_config['endpoint_tag'],
-                                       "Exploratory": notebook_config['exploratory_name'],
-                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
         notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'], os.environ['endpoint_name'])
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -93,45 +99,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                    edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -139,9 +145,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -158,9 +163,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -176,9 +180,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -189,7 +192,7 @@
                  "--region {2} --spark_version {3} " \
                  "--hadoop_version {4} --os_user {5} " \
                  "--scala_version {6} --r_mirror {7} " \
-                 "--ip_adress {8} --exploratory_name {9} --edge_ip {10}".\
+                 "--ip_address {8} --exploratory_name {9} --edge_ip {10}".\
             format(instance_hostname, keyfile_name,
                    os.environ['azure_region'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
@@ -197,15 +200,14 @@
                    notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -218,12 +220,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -235,12 +236,11 @@
             # local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -256,44 +256,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image from notebook.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -318,18 +319,17 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # generating output information
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
         jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
@@ -368,6 +368,6 @@
                    ]}
             result.write(json.dumps(res))
     except Exception as err:
-        append_result("Failed to generate output information", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
index eaf8b75..1984360 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
+import traceback
 
 
 if __name__ == "__main__":
@@ -38,54 +40,57 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
         notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'],
-                                                                os.environ['endpoint_name'],
-                                                                notebook_config['exploratory_name'])
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
-            notebook_config['service_base_name'],
-            notebook_config['endpoint_name'],
-            notebook_config['project_name'],
-            os.environ['application'])
+                notebook_config['service_base_name'],
+                notebook_config['project_name'],
+                notebook_config['endpoint_name'],
+                os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
-                                       "SBN": notebook_config['service_base_name'],
-                                       "User": notebook_config['user_name'],
-                                       "project_tag": notebook_config['project_tag'],
-                                       "endpoint_tag": notebook_config['endpoint_tag'],
-                                       "Exploratory": notebook_config['exploratory_name'],
-                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "project_tag": notebook_config['project_tag'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         else:
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
                 notebook_config['service_base_name'],
                 notebook_config['endpoint_name'],
                 os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
-                                       "SBN": notebook_config['service_base_name'],
-                                       "User": notebook_config['user_name'],
-                                       "endpoint_tag": notebook_config['endpoint_tag'],
-                                       "Exploratory": notebook_config['exploratory_name'],
-                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                                             "SBN": notebook_config['service_base_name'],
+                                             "User": notebook_config['user_name'],
+                                             "endpoint_tag": notebook_config['endpoint_tag'],
+                                             "Exploratory": notebook_config['exploratory_name'],
+                                             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
-        notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'])
+        notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -94,26 +99,27 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                    edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
             initial_user = 'ubuntu'
@@ -122,9 +128,8 @@
             initial_user = 'ec2-user'
             sudo_group = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -140,9 +145,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -159,9 +163,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -177,9 +180,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -190,7 +192,7 @@
                  "--region {2} --spark_version {3} " \
                  "--hadoop_version {4} --os_user {5} " \
                  "--scala_version {6} --r_mirror {7} " \
-                 "--ip_adress {8} --exploratory_name {9} --edge_ip {10}".\
+                 "--ip_address {8} --exploratory_name {9} --edge_ip {10}".\
             format(instance_hostname, keyfile_name,
                    os.environ['azure_region'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
@@ -198,15 +200,13 @@
                    notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_jupyterlab_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -219,12 +219,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -236,23 +235,22 @@
             # local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
-
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
                                                           notebook_config['instance_name'],
                                                           os.environ['azure_region'],
                                                           notebook_config['expected_image_name'],
@@ -261,23 +259,23 @@
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image from notebook.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image from notebook.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -302,12 +300,11 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -325,8 +322,7 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy for docker.", str(err))
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
         GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
@@ -346,15 +342,14 @@
              traceback.print_exc()
              raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start Jupyter container.", str(err))
+        dlab.fab.append_result("Failed to start Jupyter container.", str(err))
         GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # generating output information
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
         jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
@@ -393,6 +388,6 @@
                    ]}
             result.write(json.dumps(res))
     except Exception as err:
-        append_result("Failed to generate output information", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
index f5c98ab..e2d481d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
@@ -22,16 +22,22 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os, re
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import re
 import traceback
 from Crypto.PublicKey import RSA
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+from fabric.api import *
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/project/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
@@ -39,12 +45,15 @@
 
     try:
         print('Generating infrastructure names and tags')
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         project_conf = dict()
-        project_conf['service_base_name'] = os.environ['conf_service_base_name']
-        project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-        project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-        project_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-        project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+        project_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
+        project_conf['project_name'] = (os.environ['project_name'])
+        project_conf['project_tag'] = project_conf['project_name']
+        project_conf['endpoint_name'] = (os.environ['endpoint_name'])
+        project_conf['endpoint_tag'] = project_conf['endpoint_name']
         project_conf['resource_group_name'] = os.environ['azure_resource_group_name']
 
         project_conf['azure_ad_user_name'] = os.environ['azure_iam_user']
@@ -52,11 +61,15 @@
         project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
         project_conf['vpc_name'] = os.environ['azure_vpc_name']
         project_conf['subnet_name'] = os.environ['azure_subnet_name']
-        project_conf['private_subnet_name'] = project_conf['service_base_name'] + '-' + project_conf['project_name'] + '-subnet'
+        project_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(project_conf['service_base_name'],
+                                                                       project_conf['project_name'],
+                                                                       project_conf['endpoint_name'])
         if os.environ['conf_network_type'] == 'private':
             project_conf['static_public_ip_name'] = 'None'
         else:
-            project_conf['static_public_ip_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-edge-ip'
+            project_conf['static_public_ip_name'] = '{}-{}-{}-edge-static-ip'.format(project_conf['service_base_name'],
+                                                                                     project_conf['project_name'],
+                                                                                     project_conf['endpoint_name'])
         project_conf['region'] = os.environ['azure_region']
         project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
         project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
@@ -65,90 +78,99 @@
                                                                   project_conf['project_name'],
                                                                   project_conf['endpoint_tag'])
         project_conf['network_interface_name'] = '{0}-nif'.format(project_conf['instance_name'])
-        project_conf['primary_disk_name'] = project_conf['instance_name'] + '-disk0'
+        project_conf['primary_disk_name'] = project_conf['instance_name'] + '-volume-0'
         project_conf['edge_security_group_name'] = project_conf['instance_name'] + '-sg'
-        project_conf['notebook_security_group_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + "-" + os.environ['endpoint_name']\
-            + '-nb-sg'
-        project_conf['master_security_group_name'] = project_conf['service_base_name'] + '-' \
-                                                    + project_conf['project_name'] + '-dataengine-master-sg'
-        project_conf['slave_security_group_name'] = project_conf['service_base_name'] + '-' \
-                                                   + project_conf['project_name'] + '-dataengine-slave-sg'
-        project_conf['edge_storage_account_name'] = '{0}-{1}-{2}-storage'.format(project_conf['service_base_name'],
+        project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
+                                                                               project_conf['project_name'],
+                                                                               project_conf['endpoint_name'])
+        project_conf['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(project_conf['service_base_name'],
+                                                                                    project_conf['project_name'],
+                                                                                    project_conf['endpoint_name'])
+        project_conf['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(project_conf['service_base_name'],
+                                                                                  project_conf['project_name'],
+                                                                                  project_conf['endpoint_name'])
+        project_conf['edge_storage_account_name'] = ('{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
                                                                                  project_conf['project_name'],
-                                                                                 project_conf['endpoint_name'])
-        project_conf['edge_container_name'] = (project_conf['service_base_name'] + '-' + project_conf['project_name'] + '-' + project_conf['endpoint_name'] +
-                                            '-container').lower()
-        project_conf['datalake_store_name'] = project_conf['service_base_name'] + '-ssn-datalake'
-        project_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(project_conf['service_base_name'],
-                                                                            project_conf['project_name'])
+                                                                                 project_conf['endpoint_name'])).lower()
+        project_conf['edge_container_name'] = ('{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+                                                                           project_conf['project_name'],
+                                                                           project_conf['endpoint_name'])).lower()
+        project_conf['datalake_store_name'] = '{}-ssn-datalake'.format(project_conf['service_base_name'])
+        project_conf['datalake_user_directory_name'] = '{0}-{1}-{2}-folder'.format(project_conf['service_base_name'],
+                                                                                   project_conf['project_name'],
+                                                                                   project_conf['endpoint_name'])
         ssh_key_path = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + '.pem'
         key = RSA.importKey(open(ssh_key_path, 'rb').read())
         project_conf['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
         project_conf['instance_storage_account_type'] = 'Premium_LRS'
         project_conf['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
         project_conf['instance_tags'] = {"Name": project_conf['instance_name'],
-                                        "SBN": project_conf['service_base_name'],
-                                        "project_tag": project_conf['project_tag'],
-                                        "endpoint_tag": project_conf['endpoint_tag'],
-                                        os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+                                         "SBN": project_conf['service_base_name'],
+                                         "project_tag": project_conf['project_tag'],
+                                         "endpoint_tag": project_conf['endpoint_tag'],
+                                         os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         project_conf['storage_account_tags'] = {"Name": project_conf['edge_storage_account_name'],
                                                 "SBN": project_conf['service_base_name'],
                                                 "project_tag": project_conf['project_tag'],
                                                 "endpoint_tag": project_conf['endpoint_tag'],
-                                                os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
+                                                os.environ['conf_billing_tag_key']:
+                                                    os.environ['conf_billing_tag_value'],
                                                 project_conf['tag_name']: project_conf['edge_storage_account_name']}
         project_conf['primary_disk_size'] = '32'
-        project_conf['shared_storage_account_name'] = '{0}-{1}-shared-storage'.format(project_conf['service_base_name'],
-                                                                                  project_conf['endpoint_name'])
-        project_conf['shared_container_name'] = '{}-{}-shared-container'.format(project_conf['service_base_name'], project_conf['endpoint_name']).lower()
+        project_conf['shared_storage_account_name'] = ('{0}-{1}-shared-bucket'.format(
+            project_conf['service_base_name'], project_conf['endpoint_name'])).lower()
+        project_conf['shared_container_name'] = ('{}-{}-shared-bucket'.format(project_conf['service_base_name'],
+                                                                              project_conf['endpoint_name'])).lower()
         project_conf['shared_storage_account_tags'] = {"Name": project_conf['shared_storage_account_name'],
-                                                   "SBN": project_conf['service_base_name'],
-                                                   os.environ['conf_billing_tag_key']: os.environ[
-                                                       'conf_billing_tag_value'], "endpoint_tag": project_conf['endpoint_tag'],
-                                                       project_conf['tag_name']: project_conf['shared_storage_account_name']}
+                                                       "SBN": project_conf['service_base_name'],
+                                                       os.environ['conf_billing_tag_key']: os.environ[
+                                                       'conf_billing_tag_value'], "endpoint_tag":
+                                                           project_conf['endpoint_tag'],
+                                                       project_conf['tag_name']:
+                                                           project_conf['shared_storage_account_name']}
 
         # FUSE in case of absence of user's key
         try:
             project_conf['user_key'] = os.environ['key']
             try:
                 local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
-                                                        os.environ['project_name']))
+                                                        project_conf['project_name']))
             except:
                 print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
         except KeyError:
             print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
             sys.exit(1)
 
-        print("Will create exploratory environment with edge node as access point as following: {}".format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+        print("Will create exploratory environment with edge node as access point as following: {}".format(json.dumps(
+            project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
         logging.info(json.dumps(project_conf))
     except Exception as err:
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATE SUBNET]')
         print('[CREATE SUBNET]')
         params = "--resource_group_name {} --vpc_name {} --region {} --vpc_cidr {} --subnet_name {} --prefix {}".\
-            format(project_conf['resource_group_name'], project_conf['vpc_name'], project_conf['region'], project_conf['vpc_cidr'],
-                   project_conf['private_subnet_name'], project_conf['private_subnet_prefix'])
+            format(project_conf['resource_group_name'], project_conf['vpc_name'], project_conf['region'],
+                   project_conf['vpc_cidr'], project_conf['private_subnet_name'], project_conf['private_subnet_prefix'])
         try:
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                         project_conf['private_subnet_name'])
+            AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                       project_conf['private_subnet_name'])
         except:
             print("Subnet hasn't been created.")
-        append_result("Failed to create subnet.", str(err))
+        dlab.fab.append_result("Failed to create subnet.", str(err))
         sys.exit(1)
 
-    project_conf['private_subnet_cidr'] = AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                                              project_conf['private_subnet_name']).address_prefix
+    project_conf['private_subnet_cidr'] = AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                               project_conf['vpc_name'],
+                                                               project_conf['private_subnet_name']).address_prefix
     print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
 
     try:
@@ -421,20 +443,20 @@
             }
         ]
         params = "--resource_group_name {} --security_group_name {} --region {} --tags '{}' --list_rules '{}'". \
-            format(project_conf['resource_group_name'], project_conf['edge_security_group_name'], project_conf['region'],
-                   json.dumps(project_conf['instance_tags']), json.dumps(edge_list_rules))
+            format(project_conf['resource_group_name'], project_conf['edge_security_group_name'],
+                   project_conf['region'], json.dumps(project_conf['instance_tags']), json.dumps(edge_list_rules))
         try:
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except Exception as err:
-            AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                         project_conf['private_subnet_name'])
+            AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                       project_conf['private_subnet_name'])
             try:
-                AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                     project_conf['edge_security_group_name'])
+                AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                                   project_conf['edge_security_group_name'])
             except:
                 print("Edge Security group hasn't been created.")
             traceback.print_exc()
-            append_result("Failed creating security group for edge node.", str(err))
+            dlab.fab.append_result("Failed creating security group for edge node.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -459,7 +481,8 @@
                 "protocol": "*",
                 "source_port_range": "*",
                 "destination_port_range": "*",
-                "source_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                "source_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                              project_conf['vpc_name'],
                                                               project_conf['subnet_name']).address_prefix,
                 "destination_address_prefix": "*",
                 "access": "Allow",
@@ -494,8 +517,9 @@
                 "source_port_range": "*",
                 "destination_port_range": "*",
                 "source_address_prefix": "*",
-                "destination_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                                              project_conf['subnet_name']).address_prefix,
+                "destination_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                                   project_conf['vpc_name'],
+                                                                   project_conf['subnet_name']).address_prefix,
                 "access": "Allow",
                 "priority": 110,
                 "direction": "Outbound"
@@ -524,21 +548,22 @@
             }
             ]
         params = "--resource_group_name {} --security_group_name {} --region {} --tags '{}' --list_rules '{}'". \
-            format(project_conf['resource_group_name'], project_conf['notebook_security_group_name'], project_conf['region'],
-                   json.dumps(project_conf['instance_tags']), json.dumps(notebook_list_rules))
+            format(project_conf['resource_group_name'], project_conf['notebook_security_group_name'],
+                   project_conf['region'], json.dumps(project_conf['instance_tags']), json.dumps(notebook_list_rules))
         try:
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
+        dlab.fab.append_result("Failed creating security group for private subnet.", str(err))
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
         try:
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['notebook_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['notebook_security_group_name'])
         except:
             print("Notebook Security group hasn't been created.")
         sys.exit(1)
@@ -563,9 +588,9 @@
                 "protocol": "*",
                 "source_port_range": "*",
                 "destination_port_range": "*",
-                "source_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'],
-                                                                project_conf['vpc_name'],
-                                                                project_conf['subnet_name']).address_prefix,
+                "source_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                              project_conf['vpc_name'],
+                                                              project_conf['subnet_name']).address_prefix,
                 "destination_address_prefix": "*",
                 "access": "Allow",
                 "priority": 110,
@@ -599,9 +624,9 @@
                 "source_port_range": "*",
                 "destination_port_range": "*",
                 "source_address_prefix": "*",
-                "destination_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'],
-                                                                     project_conf['vpc_name'],
-                                                                     project_conf['subnet_name']).address_prefix,
+                "destination_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+                                                                   project_conf['vpc_name'],
+                                                                   project_conf['subnet_name']).address_prefix,
                 "access": "Allow",
                 "priority": 110,
                 "direction": "Outbound"
@@ -638,18 +663,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['notebook_security_group_name'])
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
         try:
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['master_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['master_security_group_name'])
         except:
             print("Master Security group hasn't been created.")
-        append_result("Failed to create Security groups. Exception:" + str(err))
+        dlab.fab.append_result("Failed to create Security groups. Exception:" + str(err))
         sys.exit(1)
 
     logging.info('[CREATING SECURITY GROUPS FOR SLAVE NODES]')
@@ -664,20 +689,20 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['master_security_group_name'])
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
         try:
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['slave_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['slave_security_group_name'])
         except:
             print("Slave Security group hasn't been created.")
-        append_result("Failed to create Security groups. Exception:" + str(err))
+        dlab.fab.append_result("Failed to create Security groups. Exception:" + str(err))
         sys.exit(1)
 
     try:
@@ -688,21 +713,20 @@
                    project_conf['resource_group_name'], project_conf['region'])
         local("~/scripts/{}.py {}".format('common_create_storage_account', params))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create storage account.", str(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['master_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+        dlab.fab.append_result("Failed to create storage account.", str(err))
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
             if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
         sys.exit(1)
 
     try:
@@ -718,67 +742,71 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create storage account.", str(err))
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['master_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+        dlab.fab.append_result("Failed to create storage account.", str(err))
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
             if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
             if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
         sys.exit(1)
 
     if os.environ['azure_datalake_enable'] == 'true':
         try:
             logging.info('[CREATE DATA LAKE STORE DIRECTORY]')
             print('[CREATE DATA LAKE STORE DIRECTORY]')
-            params = "--resource_group_name {} --datalake_name {} --directory_name {} --ad_user {} --service_base_name {}". \
-                format(project_conf['resource_group_name'], project_conf['datalake_store_name'],
-                       project_conf['datalake_user_directory_name'], project_conf['azure_ad_user_name'],
-                       project_conf['service_base_name'])
+            params = "--resource_group_name {} --datalake_name {} --directory_name {} --ad_user {} " \
+                     "--service_base_name {}".format(project_conf['resource_group_name'],
+                                                     project_conf['datalake_store_name'],
+                                                     project_conf['datalake_user_directory_name'],
+                                                     project_conf['azure_ad_user_name'],
+                                                     project_conf['service_base_name'])
             try:
                 local("~/scripts/{}.py {}".format('common_create_datalake_directory', params))
             except:
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create Data Lake Store directory.", str(err))
-            AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                         project_conf['private_subnet_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['master_security_group_name'])
-            AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                     project_conf['slave_security_group_name'])
-            for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+            dlab.fab.append_result("Failed to create Data Lake Store directory.", str(err))
+            AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                       project_conf['private_subnet_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['edge_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['notebook_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['master_security_group_name'])
+            AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                               project_conf['slave_security_group_name'])
+            for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
                 if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
-                    AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                    AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
                 if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                    AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                    AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
             try:
-                for datalake in AzureMeta().list_datalakes(project_conf['resource_group_name']):
+                for datalake in AzureMeta.list_datalakes(project_conf['resource_group_name']):
                     if project_conf['datalake_store_name'] == datalake.tags["Name"]:
-                        AzureActions().remove_datalake_directory(datalake.name, project_conf['datalake_user_directory_name'])
-            except Exception as err:
-                print('Error: {0}'.format(err))
+                        AzureActions.remove_datalake_directory(datalake.name,
+                                                                 project_conf['datalake_user_directory_name'])
+            except:
                 print("Data Lake Store directory hasn't been created.")
             sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
+        project_conf['initial_user'] = 'ubuntu'
+        project_conf['sudo_group'] = 'sudo'
     if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
+        project_conf['initial_user'] = 'ec2-user'
+        project_conf['sudo_group'] = 'wheel'
 
     try:
         logging.info('[CREATE EDGE INSTANCE]')
@@ -788,10 +816,12 @@
             --dlab_ssh_user_name {} --public_ip_name {} --public_key '''{}''' --primary_disk_size {} \
             --instance_type {} --project_name {} --instance_storage_account_type {} --image_name {} --tags '{}'".\
             format(project_conf['instance_name'], os.environ['azure_edge_instance_size'], project_conf['region'],
-                   project_conf['vpc_name'], project_conf['network_interface_name'], project_conf['edge_security_group_name'],
-                   project_conf['subnet_name'], project_conf['service_base_name'], project_conf['resource_group_name'],
-                   initial_user, project_conf['static_public_ip_name'], project_conf['public_ssh_key'],
-                   project_conf['primary_disk_size'], 'edge', project_conf['project_name'], project_conf['instance_storage_account_type'],
+                   project_conf['vpc_name'], project_conf['network_interface_name'],
+                   project_conf['edge_security_group_name'], project_conf['subnet_name'],
+                   project_conf['service_base_name'], project_conf['resource_group_name'],
+                   project_conf['initial_user'], project_conf['static_public_ip_name'], project_conf['public_ssh_key'],
+                   project_conf['primary_disk_size'], 'edge', project_conf['project_name'],
+                   project_conf['instance_storage_account_type'],
                    project_conf['image_name'], json.dumps(project_conf['instance_tags']))
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
@@ -799,27 +829,29 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            AzureActions().remove_instance(project_conf['resource_group_name'], project_conf['instance_name'])
+            AzureActions.remove_instance(project_conf['resource_group_name'], project_conf['instance_name'])
         except:
             print("The instance hasn't been created.")
-        AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
-                                     project_conf['private_subnet_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                             project_conf['master_security_group_name'])
-        AzureActions().remove_security_group(project_conf['resource_group_name'],
-                                                 project_conf['slave_security_group_name'])
-        for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+        AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+                                   project_conf['private_subnet_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['edge_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['notebook_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['master_security_group_name'])
+        AzureActions.remove_security_group(project_conf['resource_group_name'],
+                                           project_conf['slave_security_group_name'])
+        for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
             if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
             if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
-                AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+                AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
         if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(project_conf['resource_group_name']):
+            for datalake in AzureMeta.list_datalakes(project_conf['resource_group_name']):
                 if project_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().remove_datalake_directory(datalake.name, project_conf['datalake_user_directory_name'])
-        append_result("Failed to create instance. Exception:" + str(err))
+                    AzureActions.remove_datalake_directory(datalake.name,
+                                                           project_conf['datalake_user_directory_name'])
+        dlab.fab.append_result("Failed to create instance. Exception:" + str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
index 202a68b..7354c3d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
@@ -22,128 +22,138 @@
 # ******************************************************************************
 
 import json
-import sys, time, os
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import logging
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import requests
+import traceback
 
 
 def terminate_edge_node(resource_group_name, service_base_name, project_tag, subnet_name, vpc_name):
     print("Terminating EDGE, notebook and dataengine virtual machines")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             try:
                 if project_tag == vm.tags["project_tag"]:
-                    AzureActions().remove_instance(resource_group_name, vm.name)
+                    AzureActions.remove_instance(resource_group_name, vm.name)
                     print("Instance {} has been terminated".format(vm.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate edge instance.", str(err))
         sys.exit(1)
 
     print("Removing network interfaces")
     try:
-        for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+        for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
             try:
                 if project_tag == network_interface.tags["project_name"]:
-                    AzureActions().delete_network_if(resource_group_name, network_interface.name)
+                    AzureActions.delete_network_if(resource_group_name, network_interface.name)
                     print("Network interface {} has been removed".format(network_interface.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove network interfaces.", str(err))
         sys.exit(1)
 
     print("Removing static public IPs")
     try:
-        for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+        for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
             try:
                 if project_tag in static_public_ip.tags["project_tag"]:
-                    AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+                    AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
                     print("Static public IP {} has been removed".format(static_public_ip.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IP addresses.", str(err))
         sys.exit(1)
 
     print("Removing disks")
     try:
-        for disk in AzureMeta().list_disks(resource_group_name):
+        for disk in AzureMeta.list_disks(resource_group_name):
             try:
                 if project_tag in disk.tags["project_tag"]:
-                    AzureActions().remove_disk(resource_group_name, disk.name)
+                    AzureActions.remove_disk(resource_group_name, disk.name)
                     print("Disk {} has been removed".format(disk.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove volumes.", str(err))
         sys.exit(1)
 
     print("Removing storage account")
     try:
-        for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+        for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
             try:
                 if project_tag == storage_account.tags["project_tag"]:
-                    AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+                    AzureActions.remove_storage_account(resource_group_name, storage_account.name)
                     print("Storage account {} has been terminated".format(storage_account.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove storage accounts.", str(err))
         sys.exit(1)
 
     print("Deleting Data Lake Store directory")
     try:
-        for datalake in AzureMeta().list_datalakes(resource_group_name):
+        for datalake in AzureMeta.list_datalakes(resource_group_name):
             try:
                 if service_base_name == datalake.tags["SBN"]:
-                    AzureActions().remove_datalake_directory(datalake.name, project_tag + '-folder')
+                    AzureActions.remove_datalake_directory(datalake.name, project_tag + '-folder')
                     print("Data Lake Store directory {} has been deleted".format(project_tag + '-folder'))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove Data Lake.", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+        for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
             try:
                 if project_tag == sg.tags["project_tag"]:
-                    AzureActions().remove_security_group(resource_group_name, sg.name)
+                    AzureActions.remove_security_group(resource_group_name, sg.name)
                     print("Security group {} has been terminated".format(sg.name))
             except:
                 pass
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups.", str(err))
         sys.exit(1)
 
     print("Removing private subnet")
     try:
-        AzureActions().remove_subnet(resource_group_name, vpc_name, subnet_name)
+        AzureActions.remove_subnet(resource_group_name, vpc_name, subnet_name)
         print("Private subnet {} has been terminated".format(subnet_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnets.", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     project_conf = dict()
     project_conf['service_base_name'] = os.environ['conf_service_base_name']
     project_conf['resource_group_name'] = os.environ['azure_resource_group_name']
-    project_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    project_conf['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-    project_conf['private_subnet_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-subnet'
+    project_conf['project_name'] = os.environ['project_name']
+    project_conf['project_tag'] = project_conf['project_name']
+    project_conf['endpoint_name'] = os.environ['endpoint_name']
+    project_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(project_conf['service_base_name'],
+                                                                   project_conf['project_name'],
+                                                                   project_conf['endpoint_name'])
     project_conf['vpc_name'] = os.environ['azure_vpc_name']
 
 
@@ -152,18 +162,22 @@
         print('[TERMINATE EDGE]')
         try:
             terminate_edge_node(project_conf['resource_group_name'], project_conf['service_base_name'],
-                                project_conf['project_tag'], project_conf['private_subnet_name'], project_conf['vpc_name'])
+                                project_conf['project_tag'], project_conf['private_subnet_name'],
+                                project_conf['vpc_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
+            dlab.fab.append_result("Failed to terminate edge.", str(err))
+            raise Exception
     except:
         sys.exit(1)
 
     try:
         print('[KEYCLOAK PROJECT CLIENT DELETE]')
         logging.info('[KEYCLOAK PROJECT CLIENT DELETE]')
-        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(os.environ['keycloak_auth_server_url'])
-        keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'], os.environ['keycloak_realm_name'])
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            os.environ['keycloak_auth_server_url'])
+        keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+                                                                    os.environ['keycloak_realm_name'])
 
         keycloak_auth_data = {
             "username": os.environ['keycloak_user'],
@@ -173,7 +187,8 @@
         }
 
         client_params = {
-            "clientId": project_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'],
+            "clientId": "{}-{}-{}".format(project_conf['service_base_name'], project_conf['project_name'],
+                                          project_conf['endpoint_name'])
         }
 
         keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
@@ -201,6 +216,6 @@
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
index a9d2e50..c4f31fe 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,31 +41,34 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
-        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'], os.environ['endpoint_name'],
-                                                                notebook_config['exploratory_name'])
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
+        notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
                                              "SBN": notebook_config['service_base_name'],
@@ -85,7 +90,8 @@
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
         notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'], os.environ['endpoint_name'])
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -94,46 +100,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                           notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        notebook_config['rstudio_pass'] = id_generator()
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                           edge_instance_name)
-
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +146,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -160,9 +164,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -178,9 +181,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring R_STUDIO and all dependencies
@@ -190,7 +192,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9} " \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9} " \
             .format(instance_hostname, keyfile_name,
                     os.environ['azure_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -198,15 +200,14 @@
                     notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_rstudio_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure rstudio.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure rstudio.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -222,9 +223,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -235,12 +235,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -256,49 +255,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
-
-                params = "--hostname {} --keyfile {} --os_user {} --rstudio_pass {}" \
-                    .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
-                            notebook_config['rstudio_pass'])
-                local("~/scripts/{}.py {}".format('rstudio_change_pass', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -323,18 +318,17 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         # generating output information
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         rstudio_ip_url = "http://" + ip_address + ":8787/"
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
         rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
@@ -377,7 +371,6 @@
                    "exploratory_pass": notebook_config['rstudio_pass']}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
index 95b78ef..30033b6 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
@@ -21,12 +21,15 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os, json
+import sys
+import os
+import json
 from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
 import traceback
 
 if __name__ == "__main__":
@@ -36,49 +39,71 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
+    def clear_resources():
+        AzureActions.remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
+        for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
+            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
+                AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+        if 'azure_security_group_name' not in os.environ:
+            AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+        if 'azure_subnet_name' not in os.environ:
+            AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                       ssn_conf['subnet_name'])
+        if 'azure_vpc_name' not in os.environ:
+            AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+        if 'azure_resource_group_name' not in os.environ:
+            AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
+
+
     try:
-        instance = 'ssn'
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
+        ssn_conf = dict()
+        ssn_conf['instance'] = 'ssn'
         
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
 
-        billing_enabled = True
-
-        ssn_conf = dict()
-        # We need to cut service_base_name to 12 symbols do to the Azure Name length limitation
-        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
+        ssn_conf['billing_enabled'] = True
+        # We need to cut service_base_name to 20 symbols do to the Azure Name length limitation
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         # Check azure predefined resources
-        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name', ssn_conf['service_base_name'])
+        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name',
+                                                         '{}-resource-group'.format(ssn_conf['service_base_name']))
         ssn_conf['vpc_name'] = os.environ.get('azure_vpc_name', '{}-vpc'.format(ssn_conf['service_base_name']))
-        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-ssn-subnet'.format(ssn_conf['service_base_name']))
-        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(ssn_conf['service_base_name']))
+        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-subnet'.format(ssn_conf['service_base_name']))
+        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(
+            ssn_conf['service_base_name']))
         # Default variables
         ssn_conf['region'] = os.environ['azure_region']
-        ssn_conf['ssn_container_name'] = '{}-ssn-container'.format(ssn_conf['service_base_name']).lower()
         ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
         ssn_conf['datalake_store_name'] = '{}-ssn-datalake'.format(ssn_conf['service_base_name'])
         ssn_conf['datalake_shared_directory_name'] = '{}-shared-folder'.format(ssn_conf['service_base_name'])
         ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
-        ssn_conf['ssh_key_path'] = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + '.pem'
+        ssn_conf['ssh_key_path'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        ssn_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(ssn_conf['instance_name'],
+                                                                               ssn_conf['region'])
         if os.environ['conf_network_type'] == 'private':
-            ssn_conf['instnace_ip'] = AzureMeta().get_private_ip_address(ssn_conf['resource_group_name'],
-                                                                        ssn_conf['instance_name'])
+            ssn_conf['instnace_ip'] = AzureMeta.get_private_ip_address(ssn_conf['resource_group_name'],
+                                                                       ssn_conf['instance_name'])
+            ssn_conf['instance_host'] = ssn_conf['instnace_ip']
         else:
-            ssn_conf['instnace_ip'] = AzureMeta().get_instance_public_ip_address(ssn_conf['resource_group_name'],
-                                                                        ssn_conf['instance_name'])
-        ssn_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(ssn_conf['instance_name'], ssn_conf['region'])
+            ssn_conf['instnace_ip'] = AzureMeta.get_instance_public_ip_address(ssn_conf['resource_group_name'],
+                                                                               ssn_conf['instance_name'])
+            ssn_conf['instance_host'] = ssn_conf['instance_dns_name']
 
         if os.environ['conf_stepcerts_enabled'] == 'true':
-            step_cert_sans = ' --san {0} --san {1} '.format(AzureMeta().get_private_ip_address(
-                ssn_conf['resource_group_name'], ssn_conf['instance_name']), ssn_conf['instance_dns_name'])
+            ssn_conf['step_cert_sans'] = ' --san {0} '.format(AzureMeta.get_private_ip_address(
+                ssn_conf['resource_group_name'], ssn_conf['instance_name']))
             if os.environ['conf_network_type'] == 'public':
-                step_cert_sans += ' --san {0}'.format(
-                    AzureMeta().get_instance_public_ip_address(ssn_conf['resource_group_name'],
-                                                               ssn_conf['instance_name']))
+                ssn_conf['step_cert_sans'] += ' --san {0} --san {1} '.format(
+                    AzureMeta.get_instance_public_ip_address(ssn_conf['resource_group_name'],
+                                                             ssn_conf['instance_name']),
+                    ssn_conf['instance_dns_name'])
         else:
-            step_cert_sans = ''
+            ssn_conf['step_cert_sans'] = ''
 
         try:
             if os.environ['azure_offer_number'] == '':
@@ -90,62 +115,49 @@
             if os.environ['azure_region_info'] == '':
                 raise KeyError
         except KeyError:
-            billing_enabled = False
-        if not billing_enabled:
+            ssn_conf['billing_enabled'] = False
+        if not ssn_conf['billing_enabled']:
             os.environ['azure_offer_number'] = 'None'
             os.environ['azure_currency'] = 'None'
             os.environ['azure_locale'] = 'None'
             os.environ['azure_region_info'] = 'None'
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            ssn_conf['initial_user'] = 'ubuntu'
+            ssn_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            ssn_conf['initial_user'] = 'ec2-user'
+            ssn_conf['sudo_group'] = 'wheel'
     except Exception as err:
-        print("Failed to generate variables dictionary." + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
         sys.exit(1)
 
-    def clear_resources():
-        if 'azure_resource_group_name' not in os.environ:
-            AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-        if 'azure_vpc_name' not in os.environ:
-            AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-        if 'azure_subnet_name' not in os.environ:
-            AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                            ssn_conf['subnet_name'])
-        if 'azure_security_group_name' not in os.environ:
-            AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-        for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
-            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
-        AzureActions().remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
-
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
         params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], initial_user, ssn_conf['dlab_ssh_user'], sudo_group)
+            (ssn_conf['instance_host'], ssn_conf['ssh_key_path'], ssn_conf['initial_user'], ssn_conf['dlab_ssh_user'],
+             ssn_conf['sudo_group'])
         local("~/scripts/{}.py {}".format('create_ssh_user', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
         print('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
-        params = "--hostname {} --keyfile {} --pip_packages 'backoff argparse fabric==1.14.0 pymongo pyyaml pycrypto azure==2.0.0' \
-            --user {} --region {}".format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'],
-                                          ssn_conf['dlab_ssh_user'], ssn_conf['region'])
+        params = "--hostname {} --keyfile {} --pip_packages 'backoff argparse fabric==1.14.0 pymongo pyyaml " \
+                 "pycrypto azure==2.0.0' --user {} --region {}".format(ssn_conf['instance_host'],
+                                                                       ssn_conf['ssh_key_path'],
+                                                                       ssn_conf['dlab_ssh_user'],
+                                                                       ssn_conf['region'])
         local("~/scripts/{}.py {}".format('install_prerequisites', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Failed installing software: pip, packages.", str(err))
+        dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
         sys.exit(1)
 
     try:
@@ -157,15 +169,14 @@
                              "subnet_id": ssn_conf['subnet_name'], "admin_key": os.environ['conf_key_name']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
                  "--tag_resource_id {} --step_cert_sans '{}'". \
-            format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
+            format(ssn_conf['instance_host'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
                    ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'],
-                   step_cert_sans)
+                   ssn_conf['step_cert_sans'])
         local("~/scripts/{}.py {}".format('configure_ssn_node', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Failed configuring ssn.", str(err))
+        dlab.fab.append_result("Failed configuring ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -181,28 +192,28 @@
                              {"name": "tensor", "tag": "latest"},
                              {"name": "deeplearning", "tag": "latest"},
                              {"name": "dataengine", "tag": "latest"}]
-        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} --cloud_provider {} --region {}". \
-            format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
-                   os.environ['conf_os_family'], ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
-                   os.environ['conf_cloud_provider'], ssn_conf['region'])
+        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
+                 "--cloud_provider {} --region {}".format(ssn_conf['instance_host'], ssn_conf['ssh_key_path'],
+                                                          json.dumps(additional_config), os.environ['conf_os_family'],
+                                                          ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
+                                                          os.environ['conf_cloud_provider'], ssn_conf['region'])
         local("~/scripts/{}.py {}".format('configure_docker', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Unable to configure docker.", str(err))
+        dlab.fab.append_result("Unable to configure docker.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CONFIGURE SSN INSTANCE UI]')
         print('[CONFIGURE SSN INSTANCE UI]')
-        azure_auth_path = '/home/{}/keys/azure_auth.json'.format(ssn_conf['dlab_ssh_user'])
-        ldap_login = 'false'
+        ssn_conf['azure_auth_path'] = '/home/{}/keys/azure_auth.json'.format(ssn_conf['dlab_ssh_user'])
+        ssn_conf['ldap_login'] = 'false'
 
         cloud_params = [
             {
                 'key': 'KEYCLOAK_REDIRECT_URI',
-                'value': "https://{0}/".format(ssn_conf['instnace_ip'])
+                'value': "https://{0}/".format(ssn_conf['instance_host'])
             },
             {
                 'key': 'KEYCLOAK_REALM_NAME',
@@ -387,11 +398,11 @@
                     'value': ''
                 })
             if os.environ['azure_oauth2_enabled'] == 'false':
-                ldap_login = 'true'
-            tenant_id = json.dumps(AzureMeta().sp_creds['tenantId']).replace('"', '')
-            subscription_id = json.dumps(AzureMeta().sp_creds['subscriptionId']).replace('"', '')
-            datalake_application_id = os.environ['azure_application_id']
-            datalake_store_name = None
+                ssn_conf['ldap_login'] = 'true'
+            ssn_conf['tenant_id'] = json.dumps(AzureMeta.sp_creds['tenantId']).replace('"', '')
+            ssn_conf['subscription_id'] = json.dumps(AzureMeta.sp_creds['subscriptionId']).replace('"', '')
+            ssn_conf['datalake_application_id'] = os.environ['azure_application_id']
+            ssn_conf['datalake_store_name'] = None
         else:
             cloud_params.append(
                 {
@@ -403,10 +414,10 @@
                     'key': 'AZURE_CLIENT_ID',
                     'value': os.environ['azure_application_id']
                 })
-            tenant_id = json.dumps(AzureMeta().sp_creds['tenantId']).replace('"', '')
-            subscription_id = json.dumps(AzureMeta().sp_creds['subscriptionId']).replace('"', '')
-            datalake_application_id = os.environ['azure_application_id']
-            for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+            ssn_conf['tenant_id'] = json.dumps(AzureMeta.sp_creds['tenantId']).replace('"', '')
+            ssn_conf['subscription_id'] = json.dumps(AzureMeta.sp_creds['subscriptionId']).replace('"', '')
+            ssn_conf['datalake_application_id'] = os.environ['azure_application_id']
+            for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
                 if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
                     datalake_store_name = datalake.name
         params = "--hostname {} --keyfile {} --dlab_path {} --os_user {} --os_family {} --request_id {} \
@@ -425,10 +436,9 @@
                    os.environ['keycloak_auth_server_url'])
         local("~/scripts/{}.py {}".format('configure_ui', params))
     except Exception as err:
-        #print('Error: {0}'.format(err))
         traceback.print_exc()
         clear_resources()
-        append_result("Unable to configure UI.", str(err))
+        dlab.fab.append_result("Unable to configure UI.", str(err))
         sys.exit(1)
 
     try:
@@ -445,21 +455,22 @@
         print("Key name: {}".format(os.environ['conf_key_name']))
         print("VPC Name: {}".format(ssn_conf['vpc_name']))
         print("Subnet Name: {}".format(ssn_conf['subnet_name']))
-        print("Firewall Names: {}".format(ssn_conf['security_group_name']))
+        print("Security groups Names: {}".format(ssn_conf['security_group_name']))
         print("SSN instance size: {}".format(os.environ['azure_ssn_instance_size']))
+        ssn_conf['datalake_store_full_name'] = 'None'
         if os.environ['azure_datalake_enable'] == 'true':
-            for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+            for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
                 if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    datalake_store_name = datalake.name
-            print("DataLake store name: {}".format(datalake_store_name))
+                    ssn_conf['datalake_store_full_name'] = datalake.name
+                    print("DataLake store name: {}".format(ssn_conf['datalake_store_full_name']))
             print("DataLake shared directory name: {}".format(ssn_conf['datalake_shared_directory_name']))
         print("Region: {}".format(ssn_conf['region']))
-        jenkins_url = "http://{}/jenkins".format(ssn_conf['instnace_ip'])
-        jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instnace_ip'])
+        jenkins_url = "http://{}/jenkins".format(ssn_conf['instance_host'])
+        jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instance_host'])
         print("Jenkins URL: {}".format(jenkins_url))
         print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
-        print("DLab UI HTTP URL: http://{}".format(ssn_conf['instnace_ip']))
-        print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instnace_ip']))
+        print("DLab UI HTTP URL: http://{}".format(ssn_conf['instance_host']))
+        print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instance_host']))
 
         try:
             with open('jenkins_creds.txt') as f:
@@ -472,7 +483,7 @@
             if os.environ['azure_datalake_enable'] == 'false':
                 res = {"service_base_name": ssn_conf['service_base_name'],
                        "instance_name": ssn_conf['instance_name'],
-                       "instance_hostname": ssn_conf['instnace_ip'],
+                       "instance_hostname": ssn_conf['instance_host'],
                        "master_keyname": os.environ['conf_key_name'],
                        "vpc_id": ssn_conf['vpc_name'],
                        "subnet_id": ssn_conf['subnet_name'],
@@ -483,13 +494,13 @@
             else:
                 res = {"service_base_name": ssn_conf['service_base_name'],
                        "instance_name": ssn_conf['instance_name'],
-                       "instance_hostname": ssn_conf['instnace_ip'],
+                       "instance_hostname": ssn_conf['instance_host'],
                        "master_keyname": os.environ['conf_key_name'],
                        "vpc_id": ssn_conf['vpc_name'],
                        "subnet_id": ssn_conf['subnet_name'],
                        "security_id": ssn_conf['security_group_name'],
                        "instance_shape": os.environ['azure_ssn_instance_size'],
-                       "datalake_name": datalake_store_name,
+                       "datalake_name": ssn_conf['datalake_store_full_name'],
                        "datalake_shared_directory_name": ssn_conf['datalake_shared_directory_name'],
                        "region": ssn_conf['region'],
                        "action": "Create SSN instance"}
@@ -499,5 +510,6 @@
         params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
             format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'], ssn_conf['instnace_ip'])
         local("~/scripts/{}.py {}".format('upload_response_file', params))
-    except:
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
index bb6c793..408f423 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
@@ -21,13 +21,17 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os, json
+import sys
+import os
+import json
 from fabric.api import *
-from dlab.ssn_lib import *
 from Crypto.PublicKey import RSA
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
 
 
 if __name__ == "__main__":
@@ -37,26 +41,33 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
-        instance = 'ssn'
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
+        ssn_conf = dict()
+        ssn_conf['instance'] = 'ssn'
 
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-
-        ssn_conf = dict()
         # Verify vpc deployment
-        if os.environ['conf_network_type'] == 'private' and os.environ.get('azure_vpc_name') == None and os.environ.get('azure_source_vpc_name') == None:
+        if os.environ['conf_network_type'] == 'private' and not os.environ.get('azure_vpc_name') \
+                and not os.environ.get('azure_source_vpc_name'):
             raise Exception('Not possible to deploy private environment without predefined vpc or without source vpc')
-        if os.environ['conf_network_type'] == 'private' and os.environ.get('azure_resource_group_name') == None and os.environ.get('azure_source_resource_group_name') == None:
-            raise Exception('Not possible to deploy private environment without predefined resource_group_name or source_group_name')
-        # We need to cut service_base_name to 12 symbols do to the Azure Name length limitation
-        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
+        if os.environ['conf_network_type'] == 'private' and not os.environ.get('azure_resource_group_name') \
+                and not os.environ.get('azure_source_resource_group_name'):
+            raise Exception('Not possible to deploy private environment without predefined resource_group_name '
+                            'or source_group_name')
+        # We need to cut service_base_name to 20 symbols do to the Azure Name length limitation
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'][:20], '-', True)
         # Check azure predefined resources
-        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name', ssn_conf['service_base_name'])
-        ssn_conf['source_resource_group_name'] = os.environ.get('azure_source_resource_group_name', ssn_conf['resource_group_name'])
+        ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name',
+                                                         '{}-resource-group'.format(ssn_conf['service_base_name']))
+        ssn_conf['source_resource_group_name'] = os.environ.get(
+            'azure_source_resource_group_name', '{}-resource-group'.format(ssn_conf['service_base_name']))
         ssn_conf['vpc_name'] = os.environ.get('azure_vpc_name', '{}-vpc'.format(ssn_conf['service_base_name']))
-        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-ssn-subnet'.format(ssn_conf['service_base_name']))
-        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(ssn_conf['service_base_name']))
+        ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-subnet'.format(ssn_conf['service_base_name']))
+        ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name',
+                                                         '{}-sg'.format(ssn_conf['service_base_name']))
         # Default variables
         ssn_conf['region'] = os.environ['azure_region']
         ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
@@ -69,10 +80,11 @@
         if os.environ['conf_network_type'] == 'private':
             ssn_conf['static_public_ip_name'] = 'None'      
         else:
-            ssn_conf['static_public_ip_name'] = '{}-ssn-ip'.format(ssn_conf['service_base_name'])
-        key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']), 'rb').read())
+            ssn_conf['static_public_ip_name'] = '{}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+        ssn_conf['key'] = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'],
+                                                               os.environ['conf_key_name']), 'rb').read())
         ssn_conf['instance_storage_account_type'] = 'Premium_LRS'
-        ssn_conf['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
+        ssn_conf['public_ssh_key'] = ssn_conf['key'].publickey().exportKey("OpenSSH")
         ssn_conf['instance_tags'] = {"Name": ssn_conf['instance_name'],
                                      "SBN": ssn_conf['service_base_name'],
                                      os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
@@ -82,11 +94,11 @@
                                            os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         ssn_conf['primary_disk_size'] = '32'
     except Exception as err:
-        print("Failed to generate variables dictionary." + str(err))
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
-    if AzureMeta().get_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name']):
-        print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+    if AzureMeta.get_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name']):
+        dlab.fab.append_result("Service base name should be unique and less or equal 20 symbols. Please try again.")
         sys.exit(1)
 
     try:
@@ -100,8 +112,7 @@
             local("~/scripts/{}.py {}".format('ssn_create_resource_group', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating resource group: ' + str(err))
-        append_result("Failed to create Resource Group. Exception: " + str(err))
+        dlab.fab.append_result("Failed to create Resource Group.", str(err))
         sys.exit(1)
     
     try:
@@ -116,13 +127,12 @@
             local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating VPC: ' + str(err))
+        dlab.fab.append_result("Failed to create VPC.", str(err))
         try:
             if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
-            print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create VPC. Exception: " + str(err))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
         sys.exit(1)
   
     try:
@@ -138,15 +148,15 @@
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating Subnet: ' + str(err))
+        dlab.fab.append_result("Failed to create Subnet.", str(err))
         try:
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
             if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
-            print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create Subnet. Exception: " + str(err))
+            print("Resources hasn't been removed: {}".format(str(err)))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
         sys.exit(1)
     
     try:
@@ -154,20 +164,21 @@
             logging.info('[CREATING VPC PEERING]')
             print("[CREATING VPC PEERING]")
             params = "--source_resource_group_name {} --destination_resource_group_name {} " \
-            "--source_virtual_network_name {} --destination_virtual_network_name {}".format(ssn_conf['source_resource_group_name'], 
-                        ssn_conf['resource_group_name'], os.environ['azure_source_vpc_name'], ssn_conf['vpc_name'])
+                     "--source_virtual_network_name {} --destination_virtual_network_name {}".format(
+                      ssn_conf['source_resource_group_name'], ssn_conf['resource_group_name'],
+                      os.environ['azure_source_vpc_name'], ssn_conf['vpc_name'])
             local("~/scripts/{}.py {}".format('ssn_create_peering', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating VPC peering: ' + str(err))
         try:
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
             if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
             print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create VPC peering. Exception: " + str(err))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
+        dlab.fab.append_result("Failed to create VPC peering.", str(err))
         sys.exit(1)
 
     try:
@@ -229,18 +240,18 @@
             local("~/scripts/{}.py {}".format('common_create_security_group', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error creating Security group: ' + str(err))
+        dlab.fab.append_result("Error creating Security group", str(err))
         try:
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-            if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
             if 'azure_subnet_name' not in os.environ:
-                AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                             ssn_conf['subnet_name'])
+                AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                           ssn_conf['subnet_name'])
+            if 'azure_vpc_name' not in os.environ:
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
         except Exception as err:
             print("Resources hasn't been removed: " + str(err))
-        append_result("Failed to create Security group. Exception: " + str(err))
+            dlab.fab.append_result("Resources hasn't been removed.", str(err))
         sys.exit(1)
 
     if os.environ['azure_datalake_enable'] == 'true':
@@ -269,20 +280,19 @@
                 raise Exception
         except Exception as err:
             traceback.print_exc()
-            print('Error: {0}'.format(err))
-            if 'azure_resource_group_name' not in os.environ:
-                AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-            if 'azure_vpc_name' not in os.environ:
-                AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-            if 'azure_subnet_name' not in os.environ:
-                AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                                ssn_conf['subnet_name'])
-            if 'azure_security_group_name' not in os.environ:
-                AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-            for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+            dlab.fab.append_result("Failed to create Data Lake Store.", str(err))
+            for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
                 if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                    AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
-            append_result("Failed to create Data Lake Store. Exception:" + str(err))
+                    AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+            if 'azure_security_group_name' not in os.environ:
+                AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+            if 'azure_subnet_name' not in os.environ:
+                AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                           ssn_conf['subnet_name'])
+            if 'azure_vpc_name' not in os.environ:
+                AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+            if 'azure_resource_group_name' not in os.environ:
+                AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
             sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
@@ -308,22 +318,22 @@
         local("~/scripts/{}.py {}".format('common_create_instance', params))
     except Exception as err:
         traceback.print_exc()
-        print('Error: {0}'.format(err))
-        if 'azure_resource_group_name' not in os.environ:
-            AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
-        if 'azure_vpc_name' not in os.environ:
-            AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
-        if 'azure_subnet_name' not in os.environ:
-            AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
-                                            ssn_conf['subnet_name'])
-        if 'azure_security_group_name' not in os.environ:
-            AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
-        for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
-            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
-                AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+        dlab.fab.append_result("Failed to create instance.", str(err))
         try:
-            AzureActions().remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
+            AzureActions.remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
         except:
             print("The instance {} hasn't been created".format(ssn_conf['instance_name']))
-        append_result("Failed to create instance. Exception:" + str(err))
-        sys.exit(1)
\ No newline at end of file
+        for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
+            if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
+                AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+        if 'azure_security_group_name' not in os.environ:
+            AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+        if 'azure_subnet_name' not in os.environ:
+            AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+                                       ssn_conf['subnet_name'])
+        if 'azure_vpc_name' not in os.environ:
+            AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+        if 'azure_resource_group_name' not in os.environ:
+            AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
+
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
index bf2f91e..c709929 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
@@ -21,111 +21,115 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import sys
+import os
 from fabric.api import *
-from dlab.ssn_lib import *
+import logging
+import traceback
+import json
 
 
 def terminate_ssn_node(resource_group_name, service_base_name, vpc_name, region):
     print("Terminating instances")
     try:
-        for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+        for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
             if service_base_name == vm.tags["SBN"]:
-                AzureActions().remove_instance(resource_group_name, vm.name)
+                AzureActions.remove_instance(resource_group_name, vm.name)
                 print("Instance {} has been terminated".format(vm.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instances", str(err))
         sys.exit(1)
 
     print("Removing network interfaces")
     try:
-        for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+        for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
             if service_base_name == network_interface.tags["SBN"]:
-                AzureActions().delete_network_if(resource_group_name, network_interface.name)
+                AzureActions.delete_network_if(resource_group_name, network_interface.name)
                 print("Network interface {} has been removed".format(network_interface.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove network interfaces", str(err))
         sys.exit(1)
 
     print("Removing static public IPs")
     try:
-        for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+        for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
             if service_base_name == static_public_ip.tags["SBN"]:
-                AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+                AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
                 print("Static public IP {} has been removed".format(static_public_ip.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IPs", str(err))
         sys.exit(1)
 
     print("Removing disks")
     try:
-        for disk in AzureMeta().list_disks(resource_group_name):
+        for disk in AzureMeta.list_disks(resource_group_name):
             if service_base_name == disk.tags["SBN"]:
-                AzureActions().remove_disk(resource_group_name, disk.name)
+                AzureActions.remove_disk(resource_group_name, disk.name)
                 print("Disk {} has been removed".format(disk.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove disks", str(err))
         sys.exit(1)
 
     print("Removing storage accounts")
     try:
-        for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+        for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
             if service_base_name == storage_account.tags["SBN"]:
-                AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+                AzureActions.remove_storage_account(resource_group_name, storage_account.name)
                 print("Storage account {} has been terminated".format(storage_account.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove storage accounts", str(err))
         sys.exit(1)
 
     print("Removing Data Lake Store")
     try:
-        for datalake in AzureMeta().list_datalakes(resource_group_name):
+        for datalake in AzureMeta.list_datalakes(resource_group_name):
             if service_base_name == datalake.tags["SBN"]:
-                AzureActions().delete_datalake_store(resource_group_name, datalake.name)
+                AzureActions.delete_datalake_store(resource_group_name, datalake.name)
                 print("Data Lake Store {} has been terminated".format(datalake.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove Data Lake", str(err))
         sys.exit(1)
 
     print("Removing images")
     try:
-        for image in AzureMeta().list_images():
+        for image in AzureMeta.list_images():
             if service_base_name == image.tags["SBN"]:
-                AzureActions().remove_image(resource_group_name, image.name)
+                AzureActions.remove_image(resource_group_name, image.name)
                 print("Image {} has been removed".format(image.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove images", str(err))
         sys.exit(1)
 
     print("Removing security groups")
     try:
-        for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+        for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
             if service_base_name == sg.tags["SBN"]:
-                AzureActions().remove_security_group(resource_group_name, sg.name)
+                AzureActions.remove_security_group(resource_group_name, sg.name)
                 print("Security group {} has been terminated".format(sg.name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing VPC")
     try:
-        if AzureMeta().get_vpc(resource_group_name, service_base_name + '-vpc'):
-            AzureActions().remove_vpc(resource_group_name, vpc_name)
+        if AzureMeta.get_vpc(resource_group_name, service_base_name + '-vpc'):
+            AzureActions.remove_vpc(resource_group_name, vpc_name)
             print("VPC {} has been terminated".format(vpc_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove VPC", str(err))
         sys.exit(1)
 
     print("Removing Resource Group")
     try:
-        if AzureMeta().get_resource_group(service_base_name):
-            AzureActions().remove_resource_group(service_base_name, region)
+        if AzureMeta.get_resource_group(resource_group_name):
+            AzureActions.remove_resource_group(resource_group_name, region)
             print("Resource group {} has been terminated".format(vpc_name))
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove resource group", str(err))
         sys.exit(1)
 
 
@@ -136,12 +140,14 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    AzureMeta = dlab.meta_lib.AzureMeta()
+    AzureActions = dlab.actions_lib.AzureActions()
     print('Generating infrastructure names and tags')
     ssn_conf = dict()
-    ssn_conf['service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
-    ssn_conf['resource_group_name'] = replace_multi_symbols(
-        os.environ['azure_resource_group_name'].replace('_', '-')[:12], '-', True)
+    ssn_conf['service_base_name'] = dlab.fab.replace_multi_symbols(os.environ['conf_service_base_name'][:20],
+                                                                   '-', True)
+    ssn_conf['resource_group_name'] = os.environ.get(
+            'azure_source_resource_group_name', '{}-resource-group'.format(ssn_conf['service_base_name']))
     ssn_conf['region'] = os.environ['azure_region']
     ssn_conf['vpc_name'] = os.environ['azure_vpc_name']
 
@@ -155,8 +161,7 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to terminate ssn.", str(err))
+        dlab.fab.append_result("Failed to terminate ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -165,6 +170,6 @@
                    "Action": "Terminate ssn with all service_base_name environment"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
index 107f52d..1c98eee 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -41,31 +43,34 @@
                         filename=local_log_filepath)
 
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['user_keyname'] = os.environ['project_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
         notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'], os.environ['endpoint_name'],
-                                                                notebook_config['exploratory_name'])
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
                                              "SBN": notebook_config['service_base_name'],
@@ -87,7 +92,8 @@
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
         notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'], os.environ['endpoint_name'])
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -96,44 +102,45 @@
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
                                    os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                       notebook_config['project_name'],
-                                                       notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+                                                     notebook_config['project_name'],
+                                                     notebook_config['endpoint_name'])
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                    edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        append_result("Failed to generate variables dictionary", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +148,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -152,16 +158,16 @@
         print('[CONFIGURE PROXY ON TENSOR INSTANCE]')
         additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
         params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
-            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+                    notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -177,9 +183,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and all dependencies
@@ -188,21 +193,20 @@
         print('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
         params = "--hostname {0} --keyfile {1} " \
                  "--region {2} --os_user {3} " \
-                 "--ip_adress {4} --exploratory_name {5} --edge_ip {6}" \
+                 "--ip_address {4} --exploratory_name {5} --edge_ip {6}" \
                  .format(instance_hostname, keyfile_name,
                          os.environ['azure_region'], notebook_config['dlab_ssh_user'],
                          notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_tensor_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure TensorFlow.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -218,9 +222,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -231,12 +234,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -252,44 +254,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -314,18 +317,17 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # generating output information
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         tensorboard_url = "http://" + ip_address + ":6006/"
         jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
@@ -374,7 +376,6 @@
                    ]}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
index e73c023..d870ecc 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,31 +42,34 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     try:
+        AzureMeta = dlab.meta_lib.AzureMeta()
+        AzureActions = dlab.actions_lib.AzureActions()
         notebook_config = dict()
         try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+            notebook_config['exploratory_name'] = os.environ['exploratory_name']
         except:
             notebook_config['exploratory_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
         notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
         notebook_config['key_name'] = os.environ['conf_key_name']
-        notebook_config['user_keyname'] = os.environ['project_name']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
-        notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name']
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['user_keyname'] = notebook_config['project_name']
         notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
-                                                                notebook_config['project_name'], os.environ['endpoint_name'],
-                                                                notebook_config['exploratory_name'])
+                                                                   notebook_config['project_name'],
+                                                                   notebook_config['endpoint_name'],
+                                                                   notebook_config['exploratory_name'])
         notebook_config['image_enabled'] = os.environ['conf_image_enabled']
         notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
         if notebook_config['shared_image_enabled'] == 'false':
             notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
                 notebook_config['service_base_name'],
-                notebook_config['endpoint_name'],
                 notebook_config['project_name'],
+                notebook_config['endpoint_name'],
                 os.environ['application'])
             notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
                                              "SBN": notebook_config['service_base_name'],
@@ -86,7 +91,8 @@
                                              os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
         notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
         notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
-                                                                      notebook_config['project_name'], os.environ['endpoint_name'])
+                                                                         notebook_config['project_name'],
+                                                                         notebook_config['endpoint_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['tags'] = {"Name": notebook_config['instance_name'],
                                    "SBN": notebook_config['service_base_name'],
@@ -94,46 +100,46 @@
                                    "project_tag": notebook_config['project_tag'],
                                    "endpoint_tag": notebook_config['endpoint_tag'],
                                    "Exploratory": notebook_config['exploratory_name'],
-                                   "product": "dlab"}
-        notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+                                   os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                         notebook_config['instance_name'])
 
         # generating variables regarding EDGE proxy on Notebook instance
-        instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                               notebook_config['instance_name'])
+        instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                             notebook_config['instance_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                        notebook_config['project_name'],
                                                        notebook_config['endpoint_name'])
-        edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                            edge_instance_name)
+        edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                          edge_instance_name)
+        notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+                                                                                           os.environ['azure_region'])
         if os.environ['conf_network_type'] == 'private':
-            edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                        edge_instance_name)
+            edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                      edge_instance_name)
         else:
-            edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
-                                                                                edge_instance_name)
+            edge_instance_hostname = notebook_config['edge_instance_dns_name']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-        edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                           edge_instance_name)
+        edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                         edge_instance_name)
 
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate variables dictionary.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +147,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -160,9 +165,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -178,9 +182,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # installing and configuring zeppelin and all dependencies
@@ -199,25 +202,24 @@
                  "--zeppelin_version {10} --scala_version {11} " \
                  "--livy_version {12} --multiple_clusters {13} " \
                  "--r_mirror {14} --endpoint_url {15} " \
-                 "--ip_adress {16} --exploratory_name {17} --edge_ip {18} " \
+                 "--ip_address {16} --exploratory_name {17} --edge_ip {18} " \
             .format(instance_hostname, notebook_config['instance_name'], keyfile_name, os.environ['azure_region'],
-                    json.dumps(additional_config), notebook_config['dlab_ssh_user'], os.environ['notebook_spark_version'],
-                    os.environ['notebook_hadoop_version'], edge_instance_private_hostname, '3128',
-                    os.environ['notebook_zeppelin_version'], os.environ['notebook_scala_version'],
-                    os.environ['notebook_livy_version'], os.environ['notebook_multiple_clusters'],
-                    os.environ['notebook_r_mirror'], 'null',
+                    json.dumps(additional_config), notebook_config['dlab_ssh_user'],
+                    os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
+                    edge_instance_private_hostname, '3128', os.environ['notebook_zeppelin_version'],
+                    os.environ['notebook_scala_version'], os.environ['notebook_livy_version'],
+                    os.environ['notebook_multiple_clusters'], os.environ['notebook_r_mirror'], 'null',
                     notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
         try:
             local("~/scripts/{}.py {}".format('configure_zeppelin_node', params))
-            remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
-                               os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+            dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure zeppelin.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -233,9 +235,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -246,12 +247,11 @@
         try:
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     try:
@@ -267,44 +267,45 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to post configuring instance.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to post configuring instance.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+            image = AzureMeta.get_image(notebook_config['resource_group_name'],
+                                        notebook_config['expected_image_name'])
             if image == '':
                 print("Looks like it's first time we configure notebook server. Creating image.")
-                prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
-                                                          notebook_config['instance_name'],
-                                                          os.environ['azure_region'],
-                                                          notebook_config['expected_image_name'],
-                                                          json.dumps(notebook_config['image_tags']))
+                dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                      keyfile_name)
+                AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+                                                        notebook_config['instance_name'],
+                                                        os.environ['azure_region'],
+                                                        notebook_config['expected_image_name'],
+                                                        json.dumps(notebook_config['image_tags']))
                 print("Image was successfully created.")
                 local("~/scripts/{}.py".format('common_prepare_notebook'))
                 instance_running = False
                 while not instance_running:
-                    if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
-                                                       notebook_config['instance_name']) == 'running':
+                    if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+                                                     notebook_config['instance_name']) == 'running':
                         instance_running = True
-                instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                                       notebook_config['instance_name'])
-                remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
-                set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
-                              'http://{}:3128'.format(edge_instance_private_hostname))
+                instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                                     notebook_config['instance_name'])
+                dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+                                                    keyfile_name)
+                dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+                                       'http://{}:3128'.format(edge_instance_private_hostname))
                 additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
                 params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
                     .format(instance_hostname, notebook_config['instance_name'], keyfile_name,
                             json.dumps(additional_config), notebook_config['dlab_ssh_user'])
                 local("~/scripts/{}.py {}".format('common_configure_proxy', params))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
             sys.exit(1)
 
     try:
@@ -329,18 +330,17 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
         sys.exit(1)
 
     # generating output information
     try:
-        ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
-                                                        notebook_config['instance_name'])
+        ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+                                                      notebook_config['instance_name'])
         zeppelin_ip_url = "http://" + ip_address + ":8080/"
         ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
         zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
@@ -379,7 +379,6 @@
                    ]}
             result.write(json.dumps(res))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to generate output information.", str(err))
-        AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to generate output information.", str(err))
+        AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
index 010fad9..f39c138 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import traceback
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    GCPActions.delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
+    GCPActions.remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
+                              os.environ['dataproc_version'], os.environ['conf_os_user'],
+                              notebook_config['key_path'])
 
 
 if __name__ == "__main__":
@@ -40,72 +49,72 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+    notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+    notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    notebook_config['project_tag'] = notebook_config['project_name']
+    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+    notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+    notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
     notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                  notebook_config['project_name'],
                                                                  notebook_config['endpoint_name'])
-    notebook_config['cluster_name'] = meta_lib.GCPMeta().get_not_configured_dataproc(notebook_config['notebook_name'])
-    notebook_config['notebook_ip'] = meta_lib.GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+    notebook_config['cluster_name'] = GCPMeta.get_not_configured_dataproc(notebook_config['notebook_name'])
+    notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
     notebook_config['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
     edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
                                                    notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = meta_lib.GCPMeta().get_private_ip_address(edge_instance_name)
+    edge_instance_hostname = GCPMeta.get_private_ip_address(edge_instance_name)
     if os.environ['application'] == 'deeplearning':
         application = 'jupyter'
     else:
         application = os.environ['application']
-    additional_tags = json.loads(os.environ['tags'].replace("': u'", "\": \"").replace("', u'", "\", \"").replace("{u'", "{\"" ).replace("'}", "\"}"))
 
-    if '@' in additional_tags['user_tag']:
-        notebook_config['user_tag'] = additional_tags['user_tag'][:additional_tags['user_tag'].find('@')]
-    else:
-        notebook_config['user_tag'] = additional_tags['user_tag']
+    additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "" ).replace(
+        "'}", "").lower()
 
-    notebook_config['custom_tag'] = additional_tags['custom_tag']
     notebook_config['cluster_labels'] = {
         os.environ['notebook_instance_name']: "configured",
         "name": notebook_config['cluster_name'],
         "sbn": notebook_config['service_base_name'],
-        "user": notebook_config['user_tag'],
         "notebook_name": os.environ['notebook_instance_name'],
-        "project_tag": notebook_config['project_tag'],
-        "endpoint_tag": notebook_config['endpoint_tag'],
         "product": "dlab",
-        "computational_name": (os.environ['computational_name']).lower().replace('_', '-')
+        "computational_name": (os.environ['computational_name'].replace('_', '-').lower())
     }
-    if notebook_config['custom_tag'] != '':
-        notebook_config['cluster_labels'].update({'custom_tag': notebook_config['custom_tag']})
+
+    for tag in additional_tags.split(','):
+        label_key = tag.split(':')[0]
+        label_value = tag.split(':')[1].replace('_', '-')
+        if '@' in label_value:
+            label_value = label_value[:label_value.find('@')]
+        if label_value != '':
+            notebook_config['cluster_labels'].update({label_key: label_value})
 
     try:
         logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
         print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
-        params = "--bucket {} --cluster_name {} --dataproc_version {} --keyfile {} --notebook_ip {} --region {} --edge_user_name {} --project_name {} --os_user {}  --edge_hostname {} --proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
+        params = "--bucket {} --cluster_name {} --dataproc_version {} --keyfile {} --notebook_ip {} --region {} " \
+                 "--edge_user_name {} --project_name {} --os_user {}  --edge_hostname {} --proxy_port {} " \
+                 "--scala_version {} --application {} --pip_mirror {}" \
             .format(notebook_config['bucket_name'], notebook_config['cluster_name'], os.environ['dataproc_version'],
                     notebook_config['key_path'], notebook_config['notebook_ip'], os.environ['gcp_region'],
-                    notebook_config['edge_user_name'], notebook_config['project_name'], os.environ['conf_os_user'], edge_instance_hostname, '3128',
-                    os.environ['notebook_scala_version'], os.environ['application'], os.environ['conf_pypi_mirror'])
+                    notebook_config['edge_user_name'], notebook_config['project_name'], os.environ['conf_os_user'],
+                    edge_instance_hostname, '3128', os.environ['notebook_scala_version'], os.environ['application'],
+                    os.environ['conf_pypi_mirror'])
         try:
             local("~/scripts/{}_{}.py {}".format(application, 'install_dataengine-service_kernels', params))
-            actions_lib.GCPActions().update_dataproc_cluster(notebook_config['cluster_name'],
-                                                             notebook_config['cluster_labels'])
+            GCPActions.update_dataproc_cluster(notebook_config['cluster_name'], notebook_config['cluster_labels'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing Dataproc kernels.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
-        actions_lib.GCPActions().remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
-                                                os.environ['dataproc_version'], os.environ['conf_os_user'], notebook_config['key_path'])
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataproc kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -123,11 +132,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Spark.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
-        actions_lib.GCPActions().remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
-                                                os.environ['dataproc_version'], os.environ['conf_os_user'], notebook_config['key_path'])
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -137,6 +143,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
index adf1f0b..08c4c02 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
+
+
+def clear_resources():
+    for i in range(notebook_config['instance_count'] - 1):
+        slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+        GCPActions.remove_instance(slave_name, notebook_config['zone'])
+    GCPActions.remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
 
 
 if __name__ == "__main__":
@@ -40,53 +49,53 @@
                         filename=local_log_filepath)
 
     try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
         # generating variables dictionary
         print('Generating infrastructure names and tags')
         notebook_config = dict()
-        try:
-            notebook_config['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-        except:
+        if 'exploratory_name' in os.environ:
+            notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
             notebook_config['exploratory_name'] = ''
-        try:
-            notebook_config['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
             notebook_config['computational_name'] = ''
         notebook_config['service_base_name'] = os.environ['conf_service_base_name']
         notebook_config['region'] = os.environ['gcp_region']
         notebook_config['zone'] = os.environ['gcp_zone']
-        notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-        notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-        notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
-                                          '-de-' + notebook_config['exploratory_name'] + '-' + \
-                                          notebook_config['computational_name']
+        notebook_config['user_name'] = os.environ['edge_user_name']
+        notebook_config['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+        notebook_config['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        notebook_config['cluster_name'] = "{}-{}-{}-de-{}".format(notebook_config['service_base_name'],
+                                                                  notebook_config['project_name'],
+                                                                  notebook_config['endpoint_name'],
+                                                                  notebook_config['computational_name'])
         notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
         notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
-        notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+        notebook_config['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
         notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
         try:
-            notebook_config['spark_master_ip'] = GCPMeta().get_private_ip_address(notebook_config['master_node_name'])
-            notebook_config['notebook_ip'] = GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+            notebook_config['spark_master_ip'] = GCPMeta.get_private_ip_address(notebook_config['master_node_name'])
+            notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
         except Exception as err:
-            print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to get instance IP address", str(err))
             sys.exit(1)
         notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
 
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
-        append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
         sys.exit(1)
 
     try:
         logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
         print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
-        params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4} --keyfile {5}" \
-                 " --notebook_ip {6} --spark_master_ip {7}".\
+        params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
+                 " --keyfile {5} --notebook_ip {6} --spark_master_ip {7}".\
             format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
                    os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
                    notebook_config['spark_master_url'], notebook_config['key_path'],
@@ -97,12 +106,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
-        append_result("Failed installing Dataengine kernels.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
         sys.exit(1)
 
     try:
@@ -122,12 +127,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(notebook_config['instance_count'] - 1):
-            slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
-        append_result("Failed to configure Spark.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure Spark.", str(err))
         sys.exit(1)
 
     try:
@@ -136,6 +137,7 @@
                    "Action": "Configure notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
index 39c93e1..c83208b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
@@ -24,10 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
 import os
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
+
 
 if __name__ == "__main__":
     instance_class = 'notebook'
@@ -37,144 +40,159 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    print('Generating infrastructure names and tags')
-    notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['region'] = os.environ['gcp_region']
-    notebook_config['zone'] = os.environ['gcp_zone']
-
-    edge_status = GCPMeta().get_instance_status('{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                                          notebook_config['project_name'],
-                                                                          notebook_config['endpoint_tag']))
-    if edge_status != 'RUNNING':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = GCPMeta().get_private_ip_address(notebook_config['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                            ssn_hostname)
-        append_result("Edge node is unavailable")
-        sys.exit(1)
-
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        notebook_config = dict()
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['region'] = os.environ['gcp_region']
+        notebook_config['zone'] = os.environ['gcp_zone']
+
+        edge_status = GCPMeta.get_instance_status('{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                                            notebook_config['project_name'],
+                                                                            notebook_config['endpoint_tag']))
+        if edge_status != 'RUNNING':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            ssn_hostname = GCPMeta.get_private_ip_address(notebook_config['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                notebook_config['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            notebook_config['vpc_name'] = '{}-vpc'.format(notebook_config['service_base_name'])
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['subnet_name'] = '{0}-{1}-{2}-subnet'.format(notebook_config['service_base_name'],
+                                                                     notebook_config['project_name'],
+                                                                     notebook_config['endpoint_tag'])
+        notebook_config['instance_size'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['notebook_service_account_name'] = '{}-{}-{}-ps-sa'.format(notebook_config['service_base_name'],
+                                                                                   notebook_config['project_name'],
+                                                                                   notebook_config['endpoint_name'])
+
+        if os.environ['conf_os_family'] == 'debian':
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(
+            os.environ['application'])
+        notebook_config['secondary_disk_size'] = os.environ['notebook_disk_size']
+
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_tag'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_tag'],
+                os.environ['application'])
         else:
-            notebook_config['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        notebook_config['vpc_name'] = '{}-ssn-vpc'.format(notebook_config['service_base_name'])
-    try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['subnet_name'] = '{0}-{1}-subnet'.format(notebook_config['service_base_name'],
-                                                             notebook_config['project_name'])
-    notebook_config['instance_size'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['notebook_service_account_name'] = '{}-{}-ps'.format(notebook_config['service_base_name'],
-                                                                         notebook_config['project_name']).replace('_', '-')
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+        notebook_config['notebook_primary_image_name'] = \
+            (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
+             else notebook_config['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
+        print('Searching pre-configured images')
+        notebook_config['primary_image_name'] = GCPMeta.get_image_by_name(
+            notebook_config['expected_primary_image_name'])
+        if notebook_config['primary_image_name'] == '':
+            notebook_config['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        else:
+            print('Pre-configured primary image found. Using: {}'.format(
+                notebook_config['primary_image_name'].get('name')))
+            notebook_config['primary_image_name'] = 'global/images/{}'.format(
+                notebook_config['primary_image_name'].get('name'))
 
-    if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
-    if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(os.environ['application'])
-    notebook_config['secondary_disk_size'] = os.environ['notebook_disk_size']
+        notebook_config['secondary_image_name'] = GCPMeta.get_image_by_name(
+            notebook_config['expected_secondary_image_name'])
+        if notebook_config['secondary_image_name'] == '':
+            notebook_config['secondary_image_name'] = 'None'
+        else:
+            print('Pre-configured secondary image found. Using: {}'.format(
+                notebook_config['secondary_image_name'].get('name')))
+            notebook_config['secondary_image_name'] = 'global/images/{}'.format(
+                notebook_config['secondary_image_name'].get('name'))
 
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-    notebook_config['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
-        else notebook_config['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
-    print('Searching pre-configured images')
-    notebook_config['primary_image_name'] = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
-    if notebook_config['primary_image_name'] == '':
-        notebook_config['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    else:
-        print('Pre-configured primary image found. Using: {}'.format(notebook_config['primary_image_name'].get('name')))
-        notebook_config['primary_image_name'] = 'global/images/{}'.format(notebook_config['primary_image_name'].get('name'))
+        notebook_config['gpu_accelerator_type'] = 'None'
 
-    notebook_config['secondary_image_name'] = GCPMeta().get_image_by_name(notebook_config['expected_secondary_image_name'])
-    if notebook_config['secondary_image_name'] == '':
-        notebook_config['secondary_image_name'] = 'None'
-    else:
-        print('Pre-configured secondary image found. Using: {}'.format(notebook_config['secondary_image_name'].get('name')))
-        notebook_config['secondary_image_name'] = 'global/images/{}'.format(notebook_config['secondary_image_name'].get('name'))
+        if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
+            notebook_config['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
 
-    notebook_config['gpu_accelerator_type'] = 'None'
+        notebook_config['network_tag'] = '{0}-{1}-{2}-ps'.format(notebook_config['service_base_name'],
+                                                                 notebook_config['project_name'],
+                                                                 notebook_config['endpoint_name'])
 
-    if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
-        notebook_config['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
+        with open('/root/result.json', 'w') as f:
+            data = {"notebook_name": notebook_config['instance_name'], "error": ""}
+            json.dump(data, f)
 
-    notebook_config['network_tag'] = '{0}-{1}-ps'.format(notebook_config['service_base_name'],
-                                                         notebook_config['project_name'])
+        additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "" ).replace(
+            "'}", "").lower()
 
-    with open('/root/result.json', 'w') as f:
-        data = {"notebook_name": notebook_config['instance_name'], "error": ""}
-        json.dump(data, f)
+        print('Additional tags will be added: {}'.format(additional_tags))
+        notebook_config['labels'] = {"name": notebook_config['instance_name'],
+                                     "sbn": notebook_config['service_base_name'],
+                                     "product": "dlab"
+                                     }
 
-    additional_tags = json.loads(os.environ['tags'].replace("': u'", "\": \"").replace("', u'", "\", \"").replace("{u'", "{\"" ).replace("'}", "\"}"))
-
-    if '@' in additional_tags['user_tag']:
-        notebook_config['user_tag'] = additional_tags['user_tag'][:additional_tags['user_tag'].find('@')]
-    else:
-        notebook_config['user_tag'] = additional_tags['user_tag']
-
-    notebook_config['custom_tag'] = additional_tags['custom_tag']
-    print('Additional tags will be added: {}'.format(additional_tags))
-    notebook_config['labels'] = {"name": notebook_config['instance_name'],
-                                 "sbn": notebook_config['service_base_name'],
-                                 "project_tag": notebook_config['project_tag'],
-                                 "endpoint_tag": notebook_config['endpoint_tag'],
-                                 "user": notebook_config['user_tag'],
-                                 "product": "dlab"
-                                 }
-
-    if notebook_config['custom_tag'] != '':
-        notebook_config['labels'].update({'custom_tag': notebook_config['custom_tag']})
-
+        for tag in additional_tags.split(','):
+            label_key = tag.split(':')[0]
+            label_value = tag.split(':')[1].replace('_', '-')
+            if '@' in label_value:
+                label_value = label_value[:label_value.find('@')]
+            if label_value != '':
+                notebook_config['labels'].update({label_key: label_value})
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
     # launching instance for notebook server
     try:
         logging.info('[CREATE NOTEBOOK INSTANCE]')
         print('[CREATE NOTEBOOK INSTANCE]')
         params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
                  "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
-                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
-                 "--gpu_accelerator_type {14} --network_tag {15} --labels '{16}' --service_base_name {17}".\
+                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+                 "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --labels '{16}' " \
+                 "--service_base_name {17}".\
             format(notebook_config['instance_name'], notebook_config['region'], notebook_config['zone'],
                    notebook_config['vpc_name'], notebook_config['subnet_name'], notebook_config['instance_size'],
-                   notebook_config['ssh_key_path'], initial_user, notebook_config['notebook_service_account_name'],
-                   notebook_config['primary_image_name'], notebook_config['secondary_image_name'], 'notebook',
-                   notebook_config['primary_disk_size'], notebook_config['secondary_disk_size'],
-                   notebook_config['gpu_accelerator_type'], notebook_config['network_tag'],
-                   json.dumps(notebook_config['labels']), notebook_config['service_base_name'])
+                   notebook_config['ssh_key_path'], notebook_config['initial_user'],
+                   notebook_config['notebook_service_account_name'], notebook_config['primary_image_name'],
+                   notebook_config['secondary_image_name'], 'notebook', notebook_config['primary_disk_size'],
+                   notebook_config['secondary_disk_size'], notebook_config['gpu_accelerator_type'],
+                   notebook_config['network_tag'], json.dumps(notebook_config['labels']),
+                   notebook_config['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        GCPActions().remove_disk(notebook_config['instance_name'], notebook_config['zone'])
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        GCPActions.remove_disk(notebook_config['instance_name'], notebook_config['zone'])
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
index b9c8a08..2d8fc8e 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
@@ -24,12 +24,14 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 import argparse
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -40,9 +42,11 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['zone'] = os.environ['gcp_zone']
 
@@ -51,10 +55,10 @@
         print('[START NOTEBOOK]')
         try:
             print("Starting notebook")
-            GCPActions().start_instance(notebook_config['notebook_name'], notebook_config['zone'])
+            GCPActions.start_instance(notebook_config['notebook_name'], notebook_config['zone'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to start notebook.", str(err))
+            dlab.fab.append_result("Failed to start notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -62,7 +66,7 @@
     try:
         logging.info('[SETUP USER GIT CREDENTIALS]')
         print('[SETUP USER GIT CREDENTIALS]')
-        notebook_config['notebook_ip'] = GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+        notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
         notebook_config['keyfile'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
             .format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
@@ -70,7 +74,7 @@
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to setup git credentials.", str(err))
+            dlab.fab.append_result("Failed to setup git credentials.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -84,7 +88,7 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -101,8 +105,6 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
index f336a0b..bcd431b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
@@ -24,9 +24,9 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import uuid
 import argparse
@@ -39,31 +39,31 @@
         labels = [
             {instance_name: '*'}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                computational_name = meta_lib.GCPMeta().get_cluster(cluster_name).get('labels').get(
+                computational_name = GCPMeta.get_cluster(cluster_name).get('labels').get(
                     'computational_name')
-                cluster = meta_lib.GCPMeta().get_list_cluster_statuses([cluster_name])
-                actions_lib.GCPActions().bucket_cleanup(bucket_name, project_name, cluster_name)
+                cluster = GCPMeta.get_list_cluster_statuses([cluster_name])
+                GCPActions.bucket_cleanup(bucket_name, project_name, cluster_name)
                 print('The bucket {} has been cleaned successfully'.format(bucket_name))
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
-                actions_lib.GCPActions().remove_kernels(instance_name, cluster_name, cluster[0]['version'], ssh_user,
-                                                        key_path, computational_name)
+                GCPActions.remove_kernels(instance_name, cluster_name, cluster[0]['version'], ssh_user,
+                                          key_path, computational_name)
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataproc", str(err))
         sys.exit(1)
 
     print("Stopping data engine cluster")
     try:
-        clusters_list = GCPMeta().get_list_instances_by_label(zone, instance_name)
+        clusters_list = GCPMeta.get_list_instances_by_label(zone, instance_name)
         if clusters_list.get('items'):
             for vm in clusters_list['items']:
                 try:
-                    GCPActions().stop_instance(vm['name'], zone)
+                    GCPActions.stop_instance(vm['name'], zone)
                     print("Instance {} has been stopped".format(vm['name']))
                 except:
                     pass
@@ -71,15 +71,14 @@
             print("There are no data engine clusters to terminate.")
 
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop dataengine cluster", str(err))
         sys.exit(1)
 
     print("Stopping notebook")
     try:
-        GCPActions().stop_instance(instance_name, zone)
+        GCPActions.stop_instance(instance_name, zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop instance", str(err))
         sys.exit(1)
 
 
@@ -92,12 +91,14 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+    notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+    notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                  notebook_config['project_name'],
@@ -115,7 +116,7 @@
                       notebook_config['project_name'])
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to stop notebook.", str(err))
+        dlab.fab.append_result("Failed to stop notebook.", str(err))
         sys.exit(1)
 
     try:
@@ -124,7 +125,6 @@
                    "Action": "Stop notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
index 4b243a0..69af7bd 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
@@ -24,9 +24,10 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 
@@ -37,22 +38,22 @@
         labels = [
             {instance_name: '*'}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                actions_lib.GCPActions().bucket_cleanup(bucket_name, user_name, cluster_name)
+                GCPActions.bucket_cleanup(bucket_name, user_name, cluster_name)
                 print('The bucket {} has been cleaned successfully'.format(bucket_name))
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataproc", str(err))
         sys.exit(1)
 
     print("Terminating data engine cluster")
     try:
-        clusters_list = GCPMeta().get_list_instances_by_label(zone, instance_name)
+        clusters_list = GCPMeta.get_list_instances_by_label(zone, instance_name)
         if clusters_list.get('items'):
             for vm in clusters_list['items']:
                 try:
@@ -64,15 +65,14 @@
             print("There are no data engine clusters to terminate.")
 
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine", str(err))
         sys.exit(1)
 
     print("Terminating notebook")
     try:
-        GCPActions().remove_instance(instance_name, zone)
+        GCPActions.remove_instance(instance_name, zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to terminate notebook.", str(err))
+        dlab.fab.append_result("Failed to terminate instance", str(err))
         sys.exit(1)
 
 
@@ -84,12 +84,14 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     notebook_config = dict()
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+    notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+    notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     notebook_config['notebook_name'] = os.environ['notebook_instance_name']
     notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
                                                                  notebook_config['project_name'],
@@ -106,7 +108,7 @@
                          notebook_config['project_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate notebook.", str(err))
+            dlab.fab.append_result("Failed to terminate notebook.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -117,6 +119,6 @@
                    "Action": "Terminate notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
index 30f9a80..05b9c9b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
@@ -24,10 +24,11 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
-from dlab.notebook_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.notebook_lib
+import traceback
 import sys
 import os
 import logging
@@ -35,7 +36,7 @@
 
 
 def configure_dataengine_service(instance, dataproc_conf):
-    dataproc_conf['instance_ip'] = meta_lib.GCPMeta().get_private_ip_address(instance)
+    dataproc_conf['instance_ip'] = GCPMeta.get_private_ip_address(instance)
     # configuring proxy on Data Engine service
     try:
         logging.info('[CONFIGURE PROXY ON DATAENGINE SERVICE]')
@@ -50,9 +51,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
 
     try:
@@ -62,16 +62,15 @@
             env['connection_attempts'] = 100
             env.key_filename = "{}".format(dataproc_conf['key_path'])
             env.host_string = dataproc_conf['dlab_ssh_user'] + '@' + dataproc_conf['instance_ip']
-            install_os_pkg(['python-pip', 'python3-pip'])
-            configure_data_engine_service_pip(dataproc_conf['instance_ip'], dataproc_conf['dlab_ssh_user'],
-                                              dataproc_conf['key_path'])
+            dlab.notebook_lib.install_os_pkg(['python-pip', 'python3-pip'])
+            dlab.fab.configure_data_engine_service_pip(dataproc_conf['instance_ip'], dataproc_conf['dlab_ssh_user'],
+                                                       dataproc_conf['key_path'])
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure dataengine service.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure dataengine service.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
 
     try:
@@ -79,7 +78,7 @@
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         slaves = []
         for idx, instance in enumerate(dataproc_conf['cluster_core_instances']):
-            slave_ip = meta_lib.GCPMeta().get_private_ip_address(instance)
+            slave_ip = GCPMeta.get_private_ip_address(instance)
             slave = {
                 'name': 'datanode{}'.format(idx + 1),
                 'ip': slave_ip,
@@ -108,12 +107,11 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure reverse proxy.", str(err))
-        actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure reverse proxy.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
 
 
@@ -124,55 +122,66 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.INFO,
                         filename=local_log_filepath)
-    print('Generating infrastructure names and tags')
-    dataproc_conf = dict()
     try:
-        dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['exploratory_name'] = ''
-    try:
-        dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['computational_name'] = ''
-    dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    dataproc_conf['key_name'] = os.environ['conf_key_name']
-    dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    dataproc_conf['region'] = os.environ['gcp_region']
-    dataproc_conf['zone'] = os.environ['gcp_zone']
-    dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'],
-                                                      dataproc_conf['project_name'])
-    dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'],
-                                                                 dataproc_conf['project_name'],
-                                                                 dataproc_conf['exploratory_name'],
-                                                                 dataproc_conf['computational_name'])
-    dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
-                                                       dataproc_conf['project_name'])
-    dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        dataproc_conf = dict()
+        if 'exploratory_name' in os.environ:
+            dataproc_conf['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            dataproc_conf['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['computational_name'] = ''
+        dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+        dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        dataproc_conf['key_name'] = os.environ['conf_key_name']
+        dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        dataproc_conf['region'] = os.environ['gcp_region']
+        dataproc_conf['zone'] = os.environ['gcp_zone']
+        dataproc_conf['subnet'] = '{0}-{1}-{2}-subnet'.format(dataproc_conf['service_base_name'],
+                                                              dataproc_conf['project_name'],
+                                                              dataproc_conf['endpoint_name'])
+        dataproc_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}'.format(dataproc_conf['service_base_name'],
+                                                                     dataproc_conf['project_name'],
+                                                                     dataproc_conf['endpoint_name'],
+                                                                     dataproc_conf['computational_name'])
+        dataproc_conf['cluster_tag'] = '{0}-{1}-{2}-ps'.format(dataproc_conf['service_base_name'],
                                                                dataproc_conf['project_name'],
                                                                dataproc_conf['endpoint_name'])
-    dataproc_conf['release_label'] = os.environ['dataproc_version']
-    dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
-    dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
-                                                                         dataproc_conf['project_name'])
-    dataproc_conf['dataproc_unique_index'] = GCPMeta().get_index_by_service_account_name(dataproc_conf['dataproc_service_account_name'])
-    service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
-                                                                         dataproc_conf['dataproc_unique_index'],
-                                                                         os.environ['gcp_project_id'])
+        dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+                                                                   dataproc_conf['project_name'],
+                                                                   dataproc_conf['endpoint_name'])
+        dataproc_conf['release_label'] = os.environ['dataproc_version']
+        dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
+        dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-{2}-ps-sa'.format(dataproc_conf['service_base_name'],
+                                                                                    dataproc_conf['project_name'],
+                                                                                    dataproc_conf['endpoint_name'])
+        dataproc_conf['dataproc_unique_index'] = GCPMeta.get_index_by_service_account_name(
+            dataproc_conf['dataproc_service_account_name'])
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
+                                                                          dataproc_conf['dataproc_unique_index'],
+                                                                          os.environ['gcp_project_id'])
 
-    dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
-                                                                    dataproc_conf['project_name'],
-                                                                    dataproc_conf['endpoint_name'])
-    dataproc_conf['edge_instance_hostname'] = GCPMeta().get_instance_public_ip_by_name(
-        dataproc_conf['edge_instance_name'])
-    dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-    dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
-    dataproc_conf['master_ip'] = meta_lib.GCPMeta().get_private_ip_address(dataproc_conf['master_name'])
+        dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
+                                                                        dataproc_conf['project_name'],
+                                                                        dataproc_conf['endpoint_name'])
+        dataproc_conf['edge_instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(
+            dataproc_conf['edge_instance_name'])
+        dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
+        dataproc_conf['master_ip'] = GCPMeta.get_private_ip_address(dataproc_conf['master_name'])
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        sys.exit(1)
 
     try:
-        res = meta_lib.GCPMeta().get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
+        res = GCPMeta.get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
         dataproc_conf['cluster_instances'] = [i.get('name') for i in res['items']]
     except Exception as err:
         traceback.print_exc()
@@ -194,7 +203,9 @@
         for job in jobs:
             if job.exitcode != 0:
                 raise Exception
-    except:
+    except Exception as err:
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+        dlab.fab.append_result("Failed to configure Dataengine-service", str(err))
         traceback.print_exc()
         raise Exception
 
@@ -230,6 +241,7 @@
                    }
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
index 004a1c9..7b9d05a 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
@@ -57,7 +57,7 @@
     job_body['job']['placement']['clusterName'] = cluster_name
     job_body['job']['pysparkJob']['mainPythonFileUri'] = 'gs://{}/jars_parser.py'.format(args.bucket)
     job_body['job']['pysparkJob']['args'][1] = args.bucket
-    job_body['job']['pysparkJob']['args'][3] = (os.environ['project_name']).lower().replace('_', '-')
+    job_body['job']['pysparkJob']['args'][3] = (os.environ['project_name']).replace('_', '-').lower()
     job_body['job']['pysparkJob']['args'][5] = cluster_name
     job_body['job']['pysparkJob']['args'][7] = cluster_version
     job_body['job']['pysparkJob']['args'][9] = os.environ['conf_os_user']
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
index 17fc776..993b8e7 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -42,91 +43,99 @@
                         level=logging.INFO,
                         filename=local_log_filepath)
     try:
-        os.environ['exploratory_name']
-    except:
-        os.environ['exploratory_name'] = ''
-    if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])):
-        time.sleep(30)
-
-    print('Generating infrastructure names and tags')
-    dataproc_conf = dict()
-    try:
-        dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['exploratory_name'] = ''
-    try:
-        dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
-    except:
-        dataproc_conf['computational_name'] = ''
-    dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    dataproc_conf['key_name'] = os.environ['conf_key_name']
-    dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    dataproc_conf['region'] = os.environ['gcp_region']
-    dataproc_conf['zone'] = os.environ['gcp_zone']
-    dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'])
-    dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'],
-                                                                 dataproc_conf['exploratory_name'], dataproc_conf['computational_name'])
-    dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'])
-    dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        dataproc_conf = dict()
+        if 'exploratory_name' in os.environ:
+            dataproc_conf['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            dataproc_conf['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
+            dataproc_conf['computational_name'] = ''
+        if os.path.exists('/response/.dataproc_creating_{}'.format(dataproc_conf['exploratory_name'])):
+            time.sleep(30)
+        dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+        dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        dataproc_conf['project_tag'] = dataproc_conf['project_name']
+        dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        dataproc_conf['endpoint_tag'] = dataproc_conf['endpoint_name']
+        dataproc_conf['key_name'] = os.environ['conf_key_name']
+        dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        dataproc_conf['region'] = os.environ['gcp_region']
+        dataproc_conf['zone'] = os.environ['gcp_zone']
+        dataproc_conf['subnet'] = '{0}-{1}-{2}-subnet'.format(dataproc_conf['service_base_name'],
+                                                              dataproc_conf['project_name'],
+                                                              dataproc_conf['endpoint_name'])
+        dataproc_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}'.format(dataproc_conf['service_base_name'],
+                                                                     dataproc_conf['project_name'],
+                                                                     dataproc_conf['endpoint_name'],
+                                                                     dataproc_conf['computational_name'])
+        dataproc_conf['cluster_tag'] = '{0}-{1}-{2}-ps'.format(dataproc_conf['service_base_name'],
                                                                dataproc_conf['project_name'],
                                                                dataproc_conf['endpoint_name'])
-    dataproc_conf['release_label'] = os.environ['dataproc_version']
-    additional_tags = json.loads(os.environ['tags'].replace("': u'", "\": \"").replace("', u'", "\", \"").replace("{u'", "{\"" ).replace("'}", "\"}"))
+        dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+                                                                   dataproc_conf['project_name'],
+                                                                   dataproc_conf['endpoint_name'])
+        dataproc_conf['release_label'] = os.environ['dataproc_version']
+        additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "").replace(
+            "'}", "").lower()
 
-    if '@' in additional_tags['user_tag']:
-        dataproc_conf['user_tag'] = additional_tags['user_tag'][:additional_tags['user_tag'].find('@')]
-    else:
-        dataproc_conf['user_tag'] = additional_tags['user_tag']
+        dataproc_conf['cluster_labels'] = {
+            os.environ['notebook_instance_name']: "not-configured",
+            "name": dataproc_conf['cluster_name'],
+            "sbn": dataproc_conf['service_base_name'],
+            "notebook_name": os.environ['notebook_instance_name'],
+            "product": "dlab",
+            "computational_name": dataproc_conf['computational_name']
+        }
 
-    dataproc_conf['custom_tag'] = additional_tags['custom_tag']
-    dataproc_conf['cluster_labels'] = {
-        os.environ['notebook_instance_name']: "not-configured",
-        "name": dataproc_conf['cluster_name'],
-        "sbn": dataproc_conf['service_base_name'],
-        "user": dataproc_conf['user_tag'],
-        "project_tag": dataproc_conf['project_tag'],
-        "endpoint_tag": dataproc_conf['endpoint_tag'],
-        "notebook_name": os.environ['notebook_instance_name'],
-        "product": "dlab",
-        "computational_name": dataproc_conf['computational_name']
-    }
-    if dataproc_conf['custom_tag'] != '':
-        dataproc_conf['cluster_labels'].update({'custom_tag': dataproc_conf['custom_tag']})
+        for tag in additional_tags.split(','):
+            label_key = tag.split(':')[0]
+            label_value = tag.split(':')[1].replace('_', '-')
+            if '@' in label_value:
+                label_value = label_value[:label_value.find('@')]
+            if label_value != '':
+                dataproc_conf['cluster_labels'].update({label_key: label_value})
+        dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-{2}-ps-sa'.format(dataproc_conf['service_base_name'],
+                                                                                    dataproc_conf['project_name'],
+                                                                                    dataproc_conf['endpoint_name'])
+        dataproc_conf['dataproc_unique_index'] = GCPMeta.get_index_by_service_account_name(
+            dataproc_conf['dataproc_service_account_name'])
+        service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
+                                                                          dataproc_conf['dataproc_unique_index'],
+                                                                          os.environ['gcp_project_id'])
+        dataproc_conf['edge_instance_hostname'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
+                                                                            dataproc_conf['project_name'],
+                                                                            dataproc_conf['endpoint_name'])
+        dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        sys.exit(1)
 
-    dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
-                                                                         dataproc_conf['project_name'])
-    dataproc_conf['dataproc_unique_index'] = GCPMeta().get_index_by_service_account_name(dataproc_conf['dataproc_service_account_name'])
-    service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
-                                                                         dataproc_conf['dataproc_unique_index'],
-                                                                         os.environ['gcp_project_id'])
-    dataproc_conf['edge_instance_hostname'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
-                                                                        dataproc_conf['project_name'],
-                                                                        dataproc_conf['endpoint_name'])
-    dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-
-    edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname'])
+    edge_status = GCPMeta.get_instance_status(dataproc_conf['edge_instance_hostname'])
     if edge_status != 'RUNNING':
         logging.info('ERROR: Edge node is unavailable! Aborting...')
         print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
-        append_result("Edge node is unavailable")
+        ssn_hostname = GCPMeta.get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
+        dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                     ssn_hostname)
+        dlab.fab.append_result("Edge node is unavailable")
         sys.exit(1)
 
-    print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+    print("Will create exploratory environment with edge node as access point as following: ".format(
+        json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
     logging.info(json.dumps(dataproc_conf))
 
     try:
-        meta_lib.GCPMeta().dataproc_waiter(dataproc_conf['cluster_labels'])
+        GCPMeta.dataproc_waiter(dataproc_conf['cluster_labels'])
         local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
     except Exception as err:
         traceback.print_exc()
-        append_result("Dataproc waiter fail.", str(err))
+        dlab.fab.append_result("Dataproc waiter fail.", str(err))
         sys.exit(1)
 
     local("echo Waiting for changes to propagate; sleep 10")
@@ -144,14 +153,16 @@
     dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count'])
     dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count'])
     if int(os.environ['dataproc_preemptible_count']) != 0:
-        dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count'])
+        dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(
+            os.environ['dataproc_preemptible_count'])
     else:
         del dataproc_cluster['config']['secondaryWorkerConfig']
     dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label']
-    ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['project_name'] + '.pub').read()
+    ssh_user_pubkey = open('{}{}.pub'.format(os.environ['conf_key_dir'], dataproc_conf['project_name'])).read()
     key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read())
     ssh_admin_pubkey = key.publickey().exportKey("OpenSSH")
-    dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
+    dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(
+        dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
     dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag']
     with open('/root/result.json', 'w') as f:
         data = {"hostname": dataproc_conf['cluster_name'], "error": ""}
@@ -160,7 +171,9 @@
     try:
         logging.info('[Creating Dataproc Cluster]')
         print('[Creating Dataproc Cluster]')
-        params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster))
+        params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'],
+                                                                   dataproc_conf['bucket_name'],
+                                                                   json.dumps(dataproc_cluster))
 
         try:
             local("~/scripts/{}.py {}".format('dataengine-service_create', params))
@@ -171,7 +184,6 @@
         keyfile_name = "/root/keys/{}.pem".format(dataproc_conf['key_name'])
         local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create Dataproc Cluster.", str(err))
+        dlab.fab.append_result("Failed to create Dataproc Cluster.", str(err))
         local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
index 4247234..3710b1c 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
@@ -21,31 +21,34 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
 import boto3
 import argparse
 import sys
+import json
 import os
 
 
 def terminate_dataproc_cluster(notebook_name, dataproc_name, bucket_name, ssh_user, key_path):
     print('Terminating Dataproc cluster and cleaning Dataproc config from bucket')
     try:
-        cluster = meta_lib.GCPMeta().get_list_cluster_statuses([dataproc_name])
+        cluster = GCPMeta.get_list_cluster_statuses([dataproc_name])
         if cluster[0]['status'] == 'running':
-            computational_name = meta_lib.GCPMeta().get_cluster(dataproc_name).get('labels').get('computational_name')
-            actions_lib.GCPActions().bucket_cleanup(bucket_name, os.environ['project_name'], dataproc_name)
+            computational_name = GCPMeta.get_cluster(dataproc_name).get('labels').get('computational_name')
+            GCPActions.bucket_cleanup(bucket_name, dataproc_conf['project_name'], dataproc_name)
             print('The bucket {} has been cleaned successfully'.format(bucket_name))
-            actions_lib.GCPActions().delete_dataproc_cluster(dataproc_name, os.environ['gcp_region'])
+            GCPActions.delete_dataproc_cluster(dataproc_name, os.environ['gcp_region'])
             print('The Dataproc cluster {} has been terminated successfully'.format(dataproc_name))
-            actions_lib.GCPActions().remove_kernels(notebook_name, dataproc_name, cluster[0]['version'], ssh_user,
+            GCPActions.remove_kernels(notebook_name, dataproc_name, cluster[0]['version'], ssh_user,
                                                     key_path, computational_name)
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate Dataproc cluster.", str(err))
         sys.exit(1)
 
 
@@ -58,12 +61,14 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     dataproc_conf = dict()
     dataproc_conf['service_base_name'] = os.environ['conf_service_base_name']
-    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+    dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     dataproc_conf['dataproc_name'] = os.environ['dataproc_cluster_name']
     dataproc_conf['gcp_project_id'] = os.environ['gcp_project_id']
     dataproc_conf['gcp_region'] = os.environ['gcp_region']
@@ -79,13 +84,13 @@
         print('[TERMINATE DATAPROC CLUSTER]')
         try:
             terminate_dataproc_cluster(dataproc_conf['notebook_name'], dataproc_conf['dataproc_name'],
-                                       dataproc_conf['bucket_name'], os.environ['conf_os_user'], dataproc_conf['key_path'])
+                                       dataproc_conf['bucket_name'], os.environ['conf_os_user'],
+                                       dataproc_conf['key_path'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Dataproc cluster.", str(err))
+            dlab.fab.append_result("Failed to terminate Dataproc cluster.", str(err))
             raise Exception
-    except Exception as err:
-        print('Error: {0}'.format(err))
+    except:
         sys.exit(1)
 
     try:
@@ -96,6 +101,6 @@
                    "Action": "Terminate Dataproc cluster"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
index bbbd6de..d50e0f0 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
@@ -24,9 +24,10 @@
 import json
 import time
 from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import sys
 import os
 import uuid
@@ -37,7 +38,7 @@
 
 def configure_slave(slave_number, data_engine):
     slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
-    slave_hostname = GCPMeta().get_private_ip_address(slave_name)
+    slave_hostname = GCPMeta.get_private_ip_address(slave_name)
     try:
         logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
         print('[CREATING DLAB SSH USER ON SLAVE NODE]')
@@ -51,18 +52,14 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to create ssh user on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON SLAVE NODE]')
         logging.info('[INSTALLING USERs KEY ON SLAVE NODE]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
             slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
@@ -70,15 +67,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install ssh user key on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install ssh user key on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -94,12 +87,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to configure proxy on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -114,13 +103,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install prerequisites on slave.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
         sys.exit(1)
 
     try:
@@ -138,16 +122,18 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed configuring slave node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to configure slave node.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure slave node.", str(err))
         sys.exit(1)
 
 
+def clear_resources():
+    for i in range(data_engine['instance_count'] - 1):
+        slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+        GCPActions.remove_instance(slave_name, data_engine['zone'])
+    GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
+
+
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
                                                os.environ['request_id'])
@@ -157,12 +143,15 @@
                         filename=local_log_filepath)
 
     try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
         print('Generating infrastructure names and tags')
         data_engine = dict()
-        data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-        data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-        data_engine['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-        data_engine['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['edge_user_name'] = (os.environ['edge_user_name'])
+        data_engine['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
         data_engine['region'] = os.environ['gcp_region']
         data_engine['zone'] = os.environ['gcp_zone']
         try:
@@ -171,24 +160,26 @@
             else:
                 data_engine['vpc_name'] = os.environ['gcp_vpc_name']
         except KeyError:
-            data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
-        try:
-            data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-        except:
+            data_engine['vpc_name'] = '{}-vpc'.format(data_engine['service_base_name'])
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+        else:
             data_engine['exploratory_name'] = ''
-        try:
-            data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-        except:
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
             data_engine['computational_name'] = ''
 
-        data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
-                                                             data_engine['project_name'])
+        data_engine['subnet_name'] = '{0}-{1}-{2}-subnet'.format(data_engine['service_base_name'],
+                                                                 data_engine['project_name'],
+                                                                 data_engine['endpoint_name'])
         data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
         data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
         data_engine['key_name'] = os.environ['conf_key_name']
         data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], data_engine['key_name'])
-        data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
-                                                                           data_engine['project_name'])
+        data_engine['dataengine_service_account_name'] = '{}-{}-{}-ps-sa'.format(data_engine['service_base_name'],
+                                                                                 data_engine['project_name'],
+                                                                                 data_engine['endpoint_name'])
 
         if os.environ['conf_os_family'] == 'debian':
             initial_user = 'ubuntu'
@@ -196,9 +187,10 @@
         if os.environ['conf_os_family'] == 'redhat':
             initial_user = 'ec2-user'
             sudo_group = 'wheel'
-        data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['project_name'] + \
-                                      '-de-' + data_engine['exploratory_name'] + '-' + \
-                                      data_engine['computational_name']
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
         data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
         data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
         data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
@@ -206,23 +198,19 @@
         data_engine['gpu_accelerator_type'] = 'None'
         if os.environ['application'] in ('tensor', 'deeplearning'):
             data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
-        data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
-                                                         data_engine['project_name'])
-        master_node_hostname = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
+        data_engine['network_tag'] = '{0}-{1}-{2}-ps'.format(data_engine['service_base_name'],
+                                                             data_engine['project_name'],
+                                                             data_engine['endpoint_name'])
+        master_node_hostname = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
         edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
                                                        data_engine['project_name'], data_engine['endpoint_tag'])
-        edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-        edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
         data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
         keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        print("Failed to generate variables dictionary.")
-        append_result("Failed to generate variables dictionary.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
         sys.exit(1)
 
     try:
@@ -238,33 +226,26 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to create ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to create ssh user on master.", str(err))
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY ON MASTER NODE]')
         logging.info('[INSTALLING USERs KEY ON MASTER NODE]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": data_engine['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
+            master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem",
+            json.dumps(additional_config), data_engine['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install ssh user on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install ssh user on master.", str(err))
         sys.exit(1)
 
     try:
@@ -280,12 +261,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to configure proxy on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to configure proxy on master.", str(err))
         sys.exit(1)
 
     try:
@@ -300,13 +277,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to install prerequisites on master.", str(err))
+        clear_resources()
+        dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
         sys.exit(1)
 
     try:
@@ -324,12 +296,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure master node", str(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to configure master node", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -344,17 +312,14 @@
             if job.exitcode != 0:
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to configure slave nodes", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
         logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
-        notebook_instance_ip = GCPMeta().get_private_ip_address(data_engine['notebook_name'])
+        notebook_instance_ip = GCPMeta.get_private_ip_address(data_engine['notebook_name'])
         additional_info = {
             "computational_name": data_engine['computational_name'],
             "master_node_hostname": master_node_hostname,
@@ -379,18 +344,15 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        for i in range(data_engine['instance_count'] - 1):
-            slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            GCPActions().remove_instance(slave_name, data_engine['zone'])
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to configure reverse proxy", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        ip_address = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
+        ip_address = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
         spark_master_url = "http://" + ip_address + ":8080"
         spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
             data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
@@ -416,6 +378,7 @@
                    }
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
index 4046abb..262868c 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
@@ -24,11 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import argparse
+from fabric.api import *
 
 if __name__ == "__main__":
     instance_class = 'notebook'
@@ -38,146 +40,152 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    print('Generating infrastructure names and tags')
-    data_engine = dict()
-    data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    data_engine['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    data_engine['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    data_engine['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    data_engine['region'] = os.environ['gcp_region']
-    data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['endpoint_name'] = os.environ['endpoint_name']
-
-    edge_status = GCPMeta().get_instance_status('{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
-                                                                          data_engine['project_name'],
-                                                                          data_engine['endpoint_name']))
-    if edge_status != 'RUNNING':
-        logging.info('ERROR: Edge node is unavailable! Aborting...')
-        print('ERROR: Edge node is unavailable! Aborting...')
-        ssn_hostname = GCPMeta().get_private_ip_address(data_engine['service_base_name'] + '-ssn')
-        put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
-                            ssn_hostname)
-        append_result("Edge node is unavailable")
-        sys.exit(1)
-
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        data_engine = dict()
+        data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+        data_engine['edge_user_name'] = (os.environ['edge_user_name'])
+        data_engine['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        data_engine['project_tag'] = data_engine['project_name']
+        data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+        data_engine['endpoint_tag'] = data_engine['endpoint_name']
+        data_engine['region'] = os.environ['gcp_region']
+        data_engine['zone'] = os.environ['gcp_zone']
+
+        edge_status = GCPMeta.get_instance_status('{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
+                                                                            data_engine['project_name'],
+                                                                            data_engine['endpoint_name']))
+        if edge_status != 'RUNNING':
+            logging.info('ERROR: Edge node is unavailable! Aborting...')
+            print('ERROR: Edge node is unavailable! Aborting...')
+            ssn_hostname = GCPMeta.get_private_ip_address(data_engine['service_base_name'] + '-ssn')
+            dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+                                         ssn_hostname)
+            dlab.fab.append_result("Edge node is unavailable")
+            sys.exit(1)
+
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                data_engine['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            data_engine['vpc_name'] = '{}-vpc'.format(data_engine['service_base_name'])
+        if 'exploratory_name' in os.environ:
+            data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
         else:
-            data_engine['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
-    try:
-        data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
-        data_engine['computational_name'] = ''
+            data_engine['exploratory_name'] = ''
+        if 'computational_name' in os.environ:
+            data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+        else:
+            data_engine['computational_name'] = ''
 
-    data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
-                                                         data_engine['project_name'])
-    data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
-    data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
-    data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
-                                                                       data_engine['project_name'])
+        data_engine['subnet_name'] = '{0}-{1}-{2}-subnet'.format(data_engine['service_base_name'],
+                                                                 data_engine['project_name'],
+                                                                 data_engine['endpoint_name'])
+        data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
+        data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
+        data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        data_engine['dataengine_service_account_name'] = '{}-{}-{}-ps-sa'.format(data_engine['service_base_name'],
+                                                                                 data_engine['project_name'],
+                                                                                 data_engine['endpoint_name'])
 
-    if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
-    if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
-    data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['project_name'] + \
-                                  '-de-' + data_engine['exploratory_name'] + '-' + \
-                                  data_engine['computational_name']
-    data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
-    data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
-    data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
-    data_engine['notebook_name'] = os.environ['notebook_instance_name']
+        if os.environ['conf_os_family'] == 'debian':
+            initial_user = 'ubuntu'
+            sudo_group = 'sudo'
+        if os.environ['conf_os_family'] == 'redhat':
+            initial_user = 'ec2-user'
+            sudo_group = 'wheel'
+        data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                              data_engine['project_name'],
+                                                              data_engine['endpoint_name'],
+                                                              data_engine['computational_name'])
+        data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
+        data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
+        data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
+        data_engine['notebook_name'] = os.environ['notebook_instance_name']
 
-    data_engine['primary_disk_size'] = '30'
-    data_engine['secondary_disk_size'] = os.environ['notebook_disk_size']
+        data_engine['primary_disk_size'] = '30'
+        data_engine['secondary_disk_size'] = os.environ['notebook_disk_size']
 
-    data_engine['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if data_engine['shared_image_enabled'] == 'false':
-        data_engine['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], data_engine['project_name'],
-            os.environ['application'])
-        data_engine['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], data_engine['project_name'],
-            os.environ['application'])
-    else:
-        data_engine['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
-        data_engine['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
-    data_engine['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
-    else data_engine['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
-    print('Searching pre-configured images')
-    data_engine['primary_image_name'] = GCPMeta().get_image_by_name(data_engine['notebook_primary_image_name'])
-    if data_engine['primary_image_name'] == '':
-        data_engine['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    else:
-        print('Pre-configured primary image found. Using: {}'.format(data_engine['primary_image_name'].get('name')))
-        data_engine['primary_image_name'] = 'global/images/{}'.format(
-            data_engine['primary_image_name'].get('name'))
+        data_engine['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if data_engine['shared_image_enabled'] == 'false':
+            data_engine['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_tag'],
+                os.environ['application'])
+            data_engine['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_tag'],
+                os.environ['application'])
+        else:
+            data_engine['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
+            data_engine['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
+        data_engine['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
+        else data_engine['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
+        print('Searching pre-configured images')
+        data_engine['primary_image_name'] = GCPMeta.get_image_by_name(data_engine['notebook_primary_image_name'])
+        if data_engine['primary_image_name'] == '':
+            data_engine['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        else:
+            print('Pre-configured primary image found. Using: {}'.format(data_engine['primary_image_name'].get('name')))
+            data_engine['primary_image_name'] = 'global/images/{}'.format(
+                data_engine['primary_image_name'].get('name'))
 
-    data_engine['secondary_image_name'] = GCPMeta().get_image_by_name(data_engine['expected_secondary_image_name'])
-    if data_engine['secondary_image_name'] == '':
-        data_engine['secondary_image_name'] = 'None'
-    else:
-        print('Pre-configured secondary image found. Using: {}'.format(data_engine['secondary_image_name'].get('name')))
-        data_engine['secondary_image_name'] = 'global/images/{}'.format(data_engine['secondary_image_name'].get('name'))
+        data_engine['secondary_image_name'] = GCPMeta.get_image_by_name(data_engine['expected_secondary_image_name'])
+        if data_engine['secondary_image_name'] == '':
+            data_engine['secondary_image_name'] = 'None'
+        else:
+            print('Pre-configured secondary image found. Using: {}'.format(
+                data_engine['secondary_image_name'].get('name')))
+            data_engine['secondary_image_name'] = 'global/images/{}'.format(
+                data_engine['secondary_image_name'].get('name'))
 
-    with open('/root/result.json', 'w') as f:
-        data = {"hostname": data_engine['cluster_name'], "error": ""}
-        json.dump(data, f)
+        with open('/root/result.json', 'w') as f:
+            data = {"hostname": data_engine['cluster_name'], "error": ""}
+            json.dump(data, f)
 
-    data_engine['gpu_accelerator_type'] = 'None'
-    if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
-        data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
-    data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
-                                                     data_engine['project_name'])
-    additional_tags = json.loads(os.environ['tags'].replace("': u'", "\": \"").replace("', u'", "\", \"").replace("{u'", "{\"" ).replace("'}", "\"}"))
+        data_engine['gpu_accelerator_type'] = 'None'
+        if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
+            data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
+        data_engine['network_tag'] = '{0}-{1}-{2}-ps'.format(data_engine['service_base_name'],
+                                                             data_engine['project_name'], data_engine['endpoint_name'])
+        additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "").replace(
+            "'}", "").lower()
 
-    if '@' in additional_tags['user_tag']:
-        data_engine['user_tag'] = additional_tags['user_tag'][:additional_tags['user_tag'].find('@')]
-    else:
-        data_engine['user_tag'] = additional_tags['user_tag']
+        data_engine['slave_labels'] = {"name": data_engine['cluster_name'],
+                                       "sbn": data_engine['service_base_name'],
+                                       "type": "slave",
+                                       "notebook_name": data_engine['notebook_name'],
+                                       "product": "dlab"}
+        data_engine['master_labels'] = {"name": data_engine['cluster_name'],
+                                        "sbn": data_engine['service_base_name'],
+                                        "type": "master",
+                                        "notebook_name": data_engine['notebook_name'],
+                                        "product": "dlab"}
 
-    data_engine['custom_tag'] = additional_tags['custom_tag']
-    data_engine['slave_labels'] = {"name": data_engine['cluster_name'],
-                                   "sbn": data_engine['service_base_name'],
-                                   "user": data_engine['user_tag'],
-                                   "project_tag": data_engine['project_tag'],
-                                   "endpoint_tag": data_engine['endpoint_tag'],
-                                   "type": "slave",
-                                   "notebook_name": data_engine['notebook_name'],
-                                   "product": "dlab"}
-    data_engine['master_labels'] = {"name": data_engine['cluster_name'],
-                                    "sbn": data_engine['service_base_name'],
-                                    "user": data_engine['user_tag'],
-                                    "project_tag": data_engine['project_tag'],
-                                    "endpoint_tag": data_engine['endpoint_tag'],
-                                    "type": "master",
-                                    "notebook_name": data_engine['notebook_name'],
-                                    "product": "dlab"}
-    if data_engine['custom_tag'] != '':
-        data_engine['slave_labels'].update({'custom_tag': data_engine['custom_tag']})
-        data_engine['master_labels'].update({'custom_tag': data_engine['custom_tag']})
+        for tag in additional_tags.split(','):
+            label_key = tag.split(':')[0]
+            label_value = tag.split(':')[1].replace('_', '-')
+            if '@' in label_value:
+                label_value = label_value[:label_value.find('@')]
+            if label_value != '':
+                data_engine['slave_labels'].update({label_key: label_value})
+                data_engine['master_labels'].update({label_key: label_value})
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary. Exception:" + str(err))
+        sys.exit(1)
 
     try:
         logging.info('[CREATE MASTER NODE]')
         print('[CREATE MASTER NODE]')
         params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
                  "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
-                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13}  " \
-                 "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}' --service_base_name {18}". \
+                 "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+                 "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} " \
+                 "--labels '{17}' --service_base_name {18}". \
             format(data_engine['master_node_name'], data_engine['region'], data_engine['zone'], data_engine['vpc_name'],
                    data_engine['subnet_name'], data_engine['master_size'], data_engine['ssh_key_path'], initial_user,
                    data_engine['dataengine_service_account_name'], data_engine['primary_image_name'],
@@ -191,9 +199,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
         sys.exit(1)
 
     try:
@@ -201,10 +208,11 @@
             logging.info('[CREATE SLAVE NODE {}]'.format(i + 1))
             print('[CREATE SLAVE NODE {}]'.format(i + 1))
             slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
-            params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
-                     "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
-                     "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
-                     "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}' --service_base_name {18}". \
+            params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} " \
+                     "--instance_size {5} --ssh_key_path {6} --initial_user {7} --service_account_name {8} " \
+                     "--image_name {9} --secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+                     "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} " \
+                     "--labels '{17}' --service_base_name {18}". \
                 format(slave_name, data_engine['region'], data_engine['zone'],
                        data_engine['vpc_name'], data_engine['subnet_name'], data_engine['slave_size'],
                        data_engine['ssh_key_path'], initial_user, data_engine['dataengine_service_account_name'],
@@ -219,13 +227,12 @@
                 traceback.print_exc()
                 raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         for i in range(data_engine['instance_count'] - 1):
             slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
             try:
-                GCPActions().remove_instance(slave_name, data_engine['zone'])
+                GCPActions.remove_instance(slave_name, data_engine['zone'])
             except:
                 print("The slave instance {} hasn't been created.".format(slave_name))
-        GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
-        append_result("Failed to create slave instances.", str(err))
+        GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
+        dlab.fab.append_result("Failed to create slave instances.", str(err))
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
index 0e40ed9..ce5af48 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
@@ -24,22 +24,24 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
+from fabric.api import *
 
 
 def start_data_engine(zone, cluster_name):
     print("Starting data engine cluster")
     try:
-        instances = GCPMeta().get_list_instances(zone, cluster_name)
+        instances = GCPMeta.get_list_instances(zone, cluster_name)
         if 'items' in instances:
             for i in instances['items']:
-                GCPActions().start_instance(i['name'], zone)
+                GCPActions.start_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to start dataengine", str(err))
         sys.exit(1)
 
 
@@ -51,23 +53,27 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+    data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     try:
         logging.info('[STARTING DATA ENGINE]')
         print('[STARTING DATA ENGINE]')
@@ -75,7 +81,7 @@
             start_data_engine(data_engine['zone'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to start Data Engine.", str(err))
+            dlab.fab.append_result("Failed to start Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -84,9 +90,9 @@
         logging.info('[UPDATE LAST ACTIVITY TIME]')
         print('[UPDATE LAST ACTIVITY TIME]')
         data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
-        data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
-        data_engine['notebook_ip'] = GCPMeta().get_private_ip_address(os.environ['notebook_instance_name'])
-        data_engine['computational_ip'] = GCPMeta().get_private_ip_address(data_engine['computational_id'])
+        data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+        data_engine['notebook_ip'] = GCPMeta.get_private_ip_address(os.environ['notebook_instance_name'])
+        data_engine['computational_ip'] = GCPMeta.get_private_ip_address(data_engine['computational_id'])
         data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
             .format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -95,18 +101,17 @@
             local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to update last activity time.", str(err))
+            dlab.fab.append_result("Failed to update last activity time.", str(err))
             raise Exception
     except:
         sys.exit(1)
 
-
     try:
         with open("/root/result.json", 'w') as result:
             res = {"service_base_name": data_engine['service_base_name'],
                    "Action": "Start Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
index 2396600..e370bfb 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
@@ -24,9 +24,10 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 
@@ -34,12 +35,12 @@
 def stop_data_engine(zone, cluster_name):
     print("Stopping data engine cluster")
     try:
-        instances = GCPMeta().get_list_instances(zone, cluster_name)
+        instances = GCPMeta.get_list_instances(zone, cluster_name)
         if 'items' in instances:
             for i in instances['items']:
-                GCPActions().stop_instance(i['name'], zone)
+                GCPActions.stop_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to stop dataengine", str(err))
         sys.exit(1)
 
 
@@ -51,23 +52,27 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+    data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     try:
         logging.info('[STOPPING DATA ENGINE]')
         print('[STOPPING DATA ENGINE]')
@@ -75,7 +80,7 @@
             stop_data_engine(data_engine['zone'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to stop Data Engine.", str(err))
+            dlab.fab.append_result("Failed to stop Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -85,6 +90,6 @@
                    "Action": "Stop Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
index f50ffb2..6d9adfd 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
@@ -24,9 +24,10 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
 import os
 import uuid
 
@@ -34,19 +35,19 @@
 def terminate_data_engine(zone, notebook_name, os_user, key_path, cluster_name):
     print("Terminating data engine cluster")
     try:
-        instances = GCPMeta().get_list_instances(zone, cluster_name)
+        instances = GCPMeta.get_list_instances(zone, cluster_name)
         if 'items' in instances:
             for i in instances['items']:
-                GCPActions().remove_instance(i['name'], zone)
+                GCPActions.remove_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine", str(err))
         sys.exit(1)
 
     print("Removing Data Engine kernels from notebook")
     try:
-        remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name)
+        dlab.actions_lib.remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove dataengine kernels from notebook", str(err))
         sys.exit(1)
 
 
@@ -58,25 +59,29 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     data_engine = dict()
-    try:
-        data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
-    except:
+    if 'exploratory_name' in os.environ:
+        data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+    else:
         data_engine['exploratory_name'] = ''
-    try:
-        data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
-    except:
+    if 'computational_name' in os.environ:
+        data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+    else:
         data_engine['computational_name'] = ''
     data_engine['service_base_name'] = os.environ['conf_service_base_name']
     data_engine['zone'] = os.environ['gcp_zone']
-    data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
-    data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
-    data_engine['cluster_name'] = \
-        data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
-        data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+    data_engine['user_name'] = os.environ['edge_user_name']
+    data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+    data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+    data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+                                                          data_engine['project_name'],
+                                                          data_engine['endpoint_name'],
+                                                          data_engine['computational_name'])
     data_engine['notebook_name'] = os.environ['notebook_instance_name']
-    data_engine['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+    data_engine['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
 
 
     try:
@@ -87,7 +92,7 @@
                                   data_engine['key_path'], data_engine['cluster_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate Data Engine.", str(err))
+            dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
             raise Exception
     except:
         sys.exit(1)
@@ -98,6 +103,6 @@
                    "Action": "Terminate Data Engine"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
index 4f18e12..f9762ac 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -37,67 +39,76 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -105,9 +116,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -124,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -134,16 +143,16 @@
         logging.info('[INSTALLING PREREQUISITES TO DEEPLEARNING NOTEBOOK INSTANCE]')
         print('[INSTALLING PREREQUISITES TO DEEPLEARNING NOTEBOOK INSTANCE]')
         params = "--hostname {} --keyfile {} --user {} --region {}". \
-            format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'], os.environ['gcp_region'])
+            format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'],
+                   os.environ['gcp_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -165,27 +174,26 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure Deep Learning node.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
         print('[INSTALLING USERs KEY]')
         logging.info('[INSTALLING USERs KEY]')
-        additional_config = {"user_keyname": os.environ['project_name'],
+        additional_config = {"user_keyname": notebook_config['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -197,35 +205,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -250,59 +257,64 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(notebook_config['project_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_access_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_access_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_access_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
index 39d00fe..110efb9 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
@@ -22,10 +22,17 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import traceback
+import logging
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import uuid
+from fabric.api import *
+
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,89 +42,124 @@
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
-    print('Generating infrastructure names and tags')
-    edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    edge_conf['key_name'] = os.environ['conf_key_name']
-    edge_conf['user_keyname'] = os.environ['project_name']
-    edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    edge_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    def clear_resources():
+        GCPActions.remove_instance(edge_conf['instance_name'], edge_conf['zone'])
+        GCPActions.remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
+        GCPActions.remove_bucket(edge_conf['bucket_name'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(edge_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(edge_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(edge_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(edge_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
+        GCPActions.remove_role(edge_conf['ps_role_name'])
+        GCPActions.remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
+        GCPActions.remove_role(edge_conf['edge_role_name'])
+        GCPActions.remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        edge_conf = dict()
+        edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        edge_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        edge_conf['key_name'] = os.environ['conf_key_name']
+        edge_conf['user_keyname'] = edge_conf['project_name']
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                edge_conf['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            edge_conf['vpc_name'] = edge_conf['service_base_name'] + '-vpc'
+        edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        edge_conf['subnet_name'] = '{0}-{1}-{2}-subnet'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name'])
+        edge_conf['region'] = os.environ['gcp_region']
+        edge_conf['zone'] = os.environ['gcp_zone']
+        edge_conf['vpc_selflink'] = GCPMeta.get_vpc(edge_conf['vpc_name'])['selfLink']
+        edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+        edge_conf['edge_service_account_name'] = '{}-{}-{}-edge-sa'.format(edge_conf['service_base_name'],
+                                                                           edge_conf['project_name'],
+                                                                           edge_conf['endpoint_name'])
+        edge_conf['edge_unique_index'] = GCPMeta.get_index_by_service_account_name(
+            edge_conf['edge_service_account_name'])
+        edge_conf['edge_role_name'] = '{}-{}-{}-edge-role'.format(edge_conf['service_base_name'],
+                                                                  edge_conf['project_name'],
+                                                                  edge_conf['edge_unique_index'])
+        edge_conf['ps_service_account_name'] = '{}-{}-{}-ps-sa'.format(edge_conf['service_base_name'],
+                                                                       edge_conf['project_name'],
+                                                                       edge_conf['endpoint_name'])
+        edge_conf['ps_unique_index'] = GCPMeta.get_index_by_service_account_name(edge_conf['ps_service_account_name'])
+        edge_conf['ps_role_name'] = '{}-{}-{}-ps-role'.format(edge_conf['service_base_name'],
+                                                              edge_conf['project_name'], edge_conf['ps_unique_index'])
+        edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'], edge_conf['endpoint_name'])
+        edge_conf['firewall_name'] = edge_conf['instance_name'] + '{}-sg'.format(edge_conf['instance_name'])
+        edge_conf['notebook_firewall_name'] = '{0}-{1}-{2}-nb-sg'.format(edge_conf['service_base_name'],
+                                                                         edge_conf['project_name'],
+                                                                         edge_conf['endpoint_name'])
+        edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name'])
+        edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+                                                                         edge_conf['endpoint_name'])
+        edge_conf['instance_size'] = os.environ['gcp_edge_instance_size']
+        edge_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        edge_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(edge_conf['service_base_name'],
+                                                                          edge_conf['project_name'],
+                                                                          edge_conf['endpoint_name'])
+        edge_conf['instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(edge_conf['instance_name'])
+        edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+        edge_conf['private_subnet_cidr'] = GCPMeta.get_subnet(edge_conf['subnet_name'],
+                                                                edge_conf['region'])['ipCidrRange']
+        edge_conf['static_ip'] = \
+            GCPMeta.get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
+        edge_conf['private_ip'] = GCPMeta.get_private_ip_address(edge_conf['instance_name'])
+        edge_conf['vpc_cidrs'] = [edge_conf['vpc_cidr']]
+        edge_conf['fw_common_name'] = '{}-{}-{}-ps-sg'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+                                                              edge_conf['endpoint_name'])
+        edge_conf['fw_ps_ingress'] = '{}-ingress'.format(edge_conf['fw_common_name'])
+        edge_conf['fw_ps_egress_private'] = '{}-egress-private'.format(edge_conf['fw_common_name'])
+        edge_conf['fw_ps_egress_public'] = '{}-egress-public'.format(edge_conf['fw_common_name'])
+        edge_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(edge_conf['instance_name'])
+        edge_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(edge_conf['instance_name'])
+        edge_conf['fw_edge_egress_public'] = '{}-egress-public'.format(edge_conf['instance_name'])
+        edge_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(edge_conf['instance_name'])
+
+        if os.environ['conf_stepcerts_enabled'] == 'true':
+            edge_conf['step_cert_sans'] = ' --san {0} --san {1} --san {2}'.format(edge_conf['static_ip'],
+                                                                                  edge_conf['instance_hostname'],
+                                                                                  edge_conf['private_ip'])
         else:
-            edge_conf['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        edge_conf['vpc_name'] = edge_conf['service_base_name'] + '-ssn-vpc'
-    edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-    edge_conf['subnet_name'] = '{0}-{1}-subnet'.format(edge_conf['service_base_name'], edge_conf['project_name'])
-    edge_conf['region'] = os.environ['gcp_region']
-    edge_conf['zone'] = os.environ['gcp_zone']
-    edge_conf['vpc_selflink'] = GCPMeta().get_vpc(edge_conf['vpc_name'])['selfLink']
-    edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
-    edge_conf['edge_service_account_name'] = '{}-{}-edge'.format(edge_conf['service_base_name'],
-                                                                 edge_conf['project_name'])
-    edge_conf['edge_unique_index'] = GCPMeta().get_index_by_service_account_name(edge_conf['edge_service_account_name'])
-    edge_conf['edge_role_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'],
-                                                      edge_conf['project_name'], edge_conf['edge_unique_index'])
-    edge_conf['ps_service_account_name'] = '{}-{}-ps'.format(edge_conf['service_base_name'],
-                                                             edge_conf['project_name'])
-    edge_conf['ps_unique_index'] = GCPMeta().get_index_by_service_account_name(edge_conf['ps_service_account_name'])
-    edge_conf['ps_role_name'] = '{}-{}-{}-ps'.format(edge_conf['service_base_name'],
-                                                  edge_conf['project_name'], edge_conf['ps_unique_index'])
-    edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
-                                                           edge_conf['project_name'], edge_conf['endpoint_name'])
-    edge_conf['firewall_name'] = edge_conf['instance_name'] + '{}-firewall'.format(edge_conf['instance_name'])
-    edge_conf['notebook_firewall_name'] = '{0}-{1}-{2}-nb-firewall'.format(edge_conf['service_base_name'],
-                                                                       edge_conf['project_name'], os.environ['endpoint_name'])
-    edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
-                                                           edge_conf['project_name'],
-                                                           edge_conf['endpoint_name'])
-    edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
-                                                                     edge_conf['endpoint_name'])
-    edge_conf['instance_size'] = os.environ['gcp_edge_instance_size']
-    edge_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    edge_conf['static_address_name'] = '{0}-{1}-ip'.format(edge_conf['service_base_name'], edge_conf['project_name'])
-    instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_conf['instance_name'])
-    edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
-    edge_conf['private_subnet_cidr'] = GCPMeta().get_subnet(edge_conf['subnet_name'],
-                                                            edge_conf['region'])['ipCidrRange']
-    edge_conf['static_ip'] = \
-        GCPMeta().get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
-    edge_conf['private_ip'] = GCPMeta().get_private_ip_address(edge_conf['instance_name'])
-    edge_conf['vpc_cidrs'] = [edge_conf['vpc_cidr']]
-    edge_conf['fw_common_name'] = '{}-{}-ps'.format(edge_conf['service_base_name'], edge_conf['project_name'])
-    edge_conf['fw_ps_ingress'] = '{}-ingress'.format(edge_conf['fw_common_name'])
-    edge_conf['fw_ps_egress_private'] = '{}-egress-private'.format(edge_conf['fw_common_name'])
-    edge_conf['fw_ps_egress_public'] = '{}-egress-public'.format(edge_conf['fw_common_name'])
-    edge_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(edge_conf['instance_name'])
-    edge_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(edge_conf['instance_name'])
-    edge_conf['fw_edge_egress_public'] = '{}-egress-public'.format(edge_conf['instance_name'])
-    edge_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(edge_conf['instance_name'])
+            edge_conf['step_cert_sans'] = ''
 
-    if os.environ['conf_stepcerts_enabled'] == 'true':
-        step_cert_sans = ' --san {0} --san {1} --san {2}'.format(edge_conf['static_ip'], instance_hostname,
-                                                                 edge_conf['private_ip'])
-    else:
-        step_cert_sans = ''
-
-    edge_conf['allowed_ip_cidr'] = list()
-    for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
-        edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+        edge_conf['allowed_ip_cidr'] = list()
+        for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+            edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+        clear_resources()
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            edge_conf['initial_user'] = 'ubuntu'
+            edge_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            edge_conf['initial_user'] = 'ec2-user'
+            edge_conf['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, "/root/keys/" + os.environ['conf_key_name'] + ".pem", initial_user,
-             edge_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+                 edge_conf['instance_hostname'], "/root/keys/" + os.environ['conf_key_name'] + ".pem",
+                 edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -125,53 +167,24 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING PREREQUISITES]')
         logging.info('[INSTALLING PREREQUISITES]')
-        params = "--hostname {} --keyfile {} --user {} --region {}".\
-            format(instance_hostname, edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'], os.environ['gcp_region'])
+        params = "--hostname {} --keyfile {} --user {} --region {}".format(
+                  edge_conf['instance_hostname'], edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'],
+                  os.environ['gcp_region'])
         try:
             local("~/scripts/{}.py {}".format('install_prerequisites', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -179,7 +192,7 @@
         logging.info('[INSTALLING HTTP PROXY]')
         additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
                              "template_file": "/root/templates/squid.conf",
-                             "project_name": os.environ['project_name'],
+                             "project_name": edge_conf['project_name'],
                              "ldap_host": os.environ['ldap_hostname'],
                              "ldap_dn": os.environ['ldap_dn'],
                              "ldap_user": os.environ['ldap_service_username'],
@@ -187,7 +200,7 @@
                              "vpc_cidrs": edge_conf['vpc_cidrs'],
                              "allowed_ip_cidr": edge_conf['allowed_ip_cidr']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
-                 .format(instance_hostname, edge_conf['ssh_key_path'], json.dumps(additional_config),
+                 .format(edge_conf['instance_hostname'], edge_conf['ssh_key_path'], json.dumps(additional_config),
                          edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('configure_http_proxy', params))
@@ -195,23 +208,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing http proxy.", str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing http proxy.", str(err))
+        clear_resources()
         sys.exit(1)
 
 
@@ -221,41 +219,29 @@
         additional_config = {"user_keyname": edge_conf['user_keyname'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, edge_conf['ssh_key_path'], json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+            edge_conf['instance_hostname'], edge_conf['ssh_key_path'], json.dumps(additional_config),
+            edge_conf['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key. Excpeption: " + str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing users key. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[INSTALLING NGINX REVERSE PROXY]')
         logging.info('[INSTALLING NGINX REVERSE PROXY]')
-        keycloak_client_secret = str(uuid.uuid4())
+        edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
         params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
-                 "--step_cert_sans '{}'" \
-            .format(instance_hostname, edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'],
-                    edge_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'], keycloak_client_secret,
-                    step_cert_sans)
+                 "--step_cert_sans '{}'".format(edge_conf['instance_hostname'], edge_conf['ssh_key_path'],
+                                                edge_conf['dlab_ssh_user'], '{}-{}-{}'.format(
+                                                                             edge_conf['service_base_name'],
+                                                                             edge_conf['project_name'],
+                                                                             edge_conf['endpoint_name']),
+                                                edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
 
         try:
             local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
@@ -268,37 +254,23 @@
             .format(edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
                     os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
                     os.environ['keycloak_user_password'],
-                    keycloak_client_secret, instance_hostname, os.environ['project_name'], os.environ['endpoint_name'])
+                    edge_conf['keycloak_client_secret'], edge_conf['instance_hostname'], edge_conf['project_name'],
+                    edge_conf['endpoint_name'])
         try:
             local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing nginx reverse proxy. Excpeption: " + str(err))
-        GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
-        GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
-        GCPActions().remove_bucket(edge_conf['bucket_name'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['ps_role_name'])
-        GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
-        GCPActions().remove_role(edge_conf['edge_role_name'])
-        GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+        dlab.fab.append_result("Failed installing nginx reverse proxy. Excpeption: " + str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
-        print("Hostname: {}".format(instance_hostname))
+        print("Hostname: {}".format(edge_conf['instance_hostname']))
         print("Public IP: {}".format(edge_conf['static_ip']))
         print("Private IP: {}".format(edge_conf['private_ip']))
         print("Key name: {}".format(edge_conf['key_name']))
@@ -306,7 +278,7 @@
         print("Shared bucket name: {}".format(edge_conf['shared_bucket_name']))
         print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr']))
         with open("/root/result.json", 'w') as result:
-            res = {"hostname": instance_hostname,
+            res = {"hostname": edge_conf['instance_hostname'],
                    "public_ip": edge_conf['static_ip'],
                    "ip": edge_conf['private_ip'],
                    "instance_id": edge_conf['instance_name'],
@@ -317,11 +289,12 @@
                    "socks_port": "1080",
                    "notebook_subnet": edge_conf['private_subnet_cidr'],
                    "full_edge_conf": edge_conf,
-                   "project_name": os.environ['project_name'],
+                   "project_name": edge_conf['project_name'],
                    "@class": "com.epam.dlab.dto.gcp.edge.EdgeInfoGcp",
                    "Action": "Create new EDGE server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
index 06085dd..52da1be 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import os
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,31 +39,34 @@
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    edge_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+    edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    edge_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
     edge_conf['region'] = os.environ['gcp_region']
     edge_conf['zone'] = os.environ['gcp_zone']
-    edge_conf['static_address_name'] = '{0}-{1}-ip'.format(edge_conf['service_base_name'], edge_conf['project_name'])
+    edge_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(edge_conf['service_base_name'],
+                                                               edge_conf['project_name'],
+                                                               edge_conf['endpoint_name'])
 
     logging.info('[START EDGE]')
     print('[START EDGE]')
     try:
-        GCPActions().start_instance(edge_conf['instance_name'], edge_conf['zone'])
+        GCPActions.start_instance(edge_conf['instance_name'], edge_conf['zone'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start edge.", str(err))
+        dlab.fab.append_result("Failed to start edge.", str(err))
         sys.exit(1)
 
     try:
-        instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_conf['instance_name'])
+        instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_conf['instance_name'])
         public_ip_address = \
-            GCPMeta().get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
-        ip_address = GCPMeta().get_private_ip_address(edge_conf['instance_name'])
+            GCPMeta.get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
+        ip_address = GCPMeta.get_private_ip_address(edge_conf['instance_name'])
         print('[SUMMARY]')
         logging.info('[SUMMARY]')
         print("Instance name: {}".format(edge_conf['instance_name']))
@@ -74,7 +81,7 @@
                    "Action": "Start up notebook server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
         sys.exit(1)
 
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
index 5342d8a..ee15222 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
@@ -21,9 +21,13 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import logging
 import sys
+import json
 
 
 if __name__ == "__main__":
@@ -35,21 +39,22 @@
                         filename=local_log_filepath)
 
     print('Generating infrastructure names and tags')
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
     edge_conf['zone'] = os.environ['gcp_zone']
-    edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+    edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    edge_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
     edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
                                                            edge_conf['project_name'], edge_conf['endpoint_name'])
 
     logging.info('[STOP EDGE]')
     print('[STOP EDGE]')
     try:
-        GCPActions().stop_instance(edge_conf['instance_name'], edge_conf['zone'])
+        GCPActions.stop_instance(edge_conf['instance_name'], edge_conf['zone'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to stop edge.", str(err))
+        dlab.fab.append_result("Failed to stop edge.", str(err))
         sys.exit(1)
 
     try:
@@ -58,7 +63,6 @@
                    "Action": "Stop edge server"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
-
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
index 6385ffd..8080988 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
@@ -22,118 +22,127 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import traceback
+import logging
 
 
-def terminate_edge_node(user_name, service_base_name, region, zone):
+def terminate_edge_node(user_name, service_base_name, region, zone, project_name, endpoint_name):
     print("Terminating Dataengine-service clusters")
     try:
         labels = [
             {'sbn': service_base_name},
             {'user': user_name}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataproc", str(err))
         sys.exit(1)
 
     print("Terminating EDGE and notebook instances")
-    base = '{}-{}'.format(service_base_name, user_name)
-    keys = ['edge', 'ps', 'ip', 'bucket', 'subnet']
+    base = '{}-{}-{}'.format(service_base_name, project_name, endpoint_name)
+    keys = ['edge', 'ps', 'static-ip', 'bucket', 'subnet']
     targets = ['{}-{}'.format(base, k) for k in keys]
     try:
-        instances = GCPMeta().get_list_instances(zone, base)
+        instances = GCPMeta.get_list_instances(zone, base)
         if 'items' in instances:
             for i in instances['items']:
                 if 'user' in i['labels'] and user_name == i['labels']['user']:
-                    GCPActions().remove_instance(i['name'], zone)
+                    GCPActions.remove_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instances", str(err))
         sys.exit(1)
 
     print("Removing static addresses")
     try:
-        static_addresses = GCPMeta().get_list_static_addresses(region, base)
+        static_addresses = GCPMeta.get_list_static_addresses(region, base)
         if 'items' in static_addresses:
             for i in static_addresses['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_static_address(i['name'], region)
+                    GCPActions.remove_static_address(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static IPs", str(err))
         sys.exit(1)
 
     print("Removing storage bucket")
     try:
-        buckets = GCPMeta().get_list_buckets(base)
+        buckets = GCPMeta.get_list_buckets(base)
         if 'items' in buckets:
             for i in buckets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_bucket(i['name'])
+                    GCPActions.remove_bucket(i['name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove buckets", str(err))
         sys.exit(1)
 
     print("Removing firewalls")
     try:
-        firewalls = GCPMeta().get_list_firewalls(base)
+        firewalls = GCPMeta.get_list_firewalls(base)
         if 'items' in firewalls:
             for i in firewalls['items']:
                 if bool(set(targets) & set(i['targetTags'])):
-                    GCPActions().remove_firewall(i['name'])
+                    GCPActions.remove_firewall(i['name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing Service accounts and roles")
     try:
-        list_service_accounts = GCPMeta().get_list_service_accounts()
+        list_service_accounts = GCPMeta.get_list_service_accounts()
         for service_account in (set(targets) & set(list_service_accounts)):
             if service_account.startswith(service_base_name):
-                GCPActions().remove_service_account(service_account, service_base_name)
-        list_roles_names = GCPMeta().get_list_roles()
+                GCPActions.remove_service_account(service_account, service_base_name)
+        list_roles_names = GCPMeta.get_list_roles()
         for role in (set(targets) & set(list_roles_names)):
             if role.startswith(service_base_name):
-                GCPActions().remove_role(role)
+                GCPActions.remove_role(role)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove service accounts and roles", str(err))
         sys.exit(1)
 
     print("Removing subnets")
     try:
-        list_subnets = GCPMeta().get_list_subnetworks(region, '', base)
+        list_subnets = GCPMeta.get_list_subnetworks(region, '', base)
         if 'items' in list_subnets:
             vpc_selflink = list_subnets['items'][0]['network']
             vpc_name = vpc_selflink.split('/')[-1]
-            subnets = GCPMeta().get_list_subnetworks(region, vpc_name, base)
+            subnets = GCPMeta.get_list_subnetworks(region, vpc_name, base)
             for i in subnets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_subnet(i['name'], region)
+                    GCPActions.remove_subnet(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnets", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/edge/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     edge_conf = dict()
-    edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    edge_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
+    edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+    edge_conf['edge_user_name'] = (os.environ['edge_user_name'])
+    edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    edge_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
     edge_conf['region'] = os.environ['gcp_region']
     edge_conf['zone'] = os.environ['gcp_zone']
 
@@ -142,12 +151,13 @@
         print('[TERMINATE EDGE]')
         try:
             terminate_edge_node(edge_conf['edge_user_name'], edge_conf['service_base_name'],
-                                edge_conf['region'], edge_conf['zone'])
+                                edge_conf['region'], edge_conf['zone'], edge_conf['project_name'],
+                                edge_conf['endpoint_name'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
-    except Exception as err:
-        print('Error: {0}'.format(err))
+            dlab.fab.append_result("Failed to terminate edge.", str(err))
+            raise Exception
+    except:
         sys.exit(1)
 
     try:
@@ -157,6 +167,6 @@
                    "Action": "Terminate edge node"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
index 79794b7..b8744e8 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -37,63 +39,76 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-                                                        notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-                                                        notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -101,9 +116,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -120,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -138,9 +151,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -164,9 +176,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -175,16 +186,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -196,35 +207,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -249,51 +259,55 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
-    print("ReverseProxyUngit".format(jupyter_ungit_access_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_access_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_access_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_access_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
index b41b024..fad785b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
@@ -1,26 +1,5 @@
 #!/usr/bin/python
 
-#  *****************************************************************************
-#  #
-#  Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#  #
-#    http://www.apache.org/licenses/LICENSE-2.0
-#  #
-#  Unless required by applicable law or agreed to in writing,
-#  software distributed under the License is distributed on an
-#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#  KIND, either express or implied.  See the License for the
-#  specific language governing permissions and limitations
-#  under the License.
-#  #
-#  ******************************************************************************
-
 # *****************************************************************************
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -45,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -58,65 +39,76 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -124,9 +116,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -143,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -161,9 +151,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring jupiter and all dependencies
@@ -186,9 +175,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure jupyter.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure jupyter.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -197,16 +185,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -218,35 +206,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['shared_image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -271,12 +258,11 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -294,9 +280,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy for docker.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
 
@@ -312,51 +297,55 @@
         try:
            local("~/scripts/jupyterlab_container_start.py {}".format(params))
         except:
-             traceback.print_exc()
-             raise Exception
+            traceback.print_exc()
+            raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start Jupyter container.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to start Jupyter container.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("JupyterLab URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
-    print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("JupyterLab URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
+        print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "JupyterLab",
-                    "url": jupyter_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_acces_url},
-                   #{"description": "JupyterLab (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "JupyterLab",
+                        "url": jupyter_notebook_acces_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_acces_url},
+                       #{"description": "JupyterLab (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
index 82edc16..47b6cde 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
@@ -22,11 +22,17 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
 import traceback
+import logging
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import uuid
+from fabric.api import *
+
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,92 +41,111 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    print('Generating infrastructure names and tags')
-    project_conf = dict()
-    project_conf['edge_unique_index'] = str(uuid.uuid4())[:5]
-    project_conf['ps_unique_index'] = str(uuid.uuid4())[:5]
-    project_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    project_conf['key_name'] = os.environ['conf_key_name']
-    project_conf['user_keyname'] = os.environ['project_name']
-    project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    project_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
     try:
-        if os.environ['gcp_vpc_name'] == '':
-            raise KeyError
-        else:
-            project_conf['vpc_name'] = os.environ['gcp_vpc_name']
-    except KeyError:
-        project_conf['vpc_name'] = project_conf['service_base_name'] + '-ssn-vpc'
-    project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-    project_conf['private_subnet_name'] = '{0}-{1}-subnet'.format(project_conf['service_base_name'],
-                                                               project_conf['project_name'])
-    project_conf['subnet_name'] = os.environ['gcp_subnet_name']
-    project_conf['region'] = os.environ['gcp_region']
-    project_conf['zone'] = os.environ['gcp_zone']
-    project_conf['vpc_selflink'] = GCPMeta().get_vpc(project_conf['vpc_name'])['selfLink']
-    project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
-    project_conf['edge_service_account_name'] = '{}-{}-edge'.format(project_conf['service_base_name'],
-                                                                 project_conf['project_name'])
-    project_conf['edge_role_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
-                                                      project_conf['project_name'], project_conf['edge_unique_index'])
-    project_conf['ps_service_account_name'] = '{}-{}-ps'.format(project_conf['service_base_name'],
-                                                             project_conf['project_name'])
-    project_conf['ps_role_name'] = '{}-{}-{}-ps'.format(project_conf['service_base_name'],
-                                                  project_conf['project_name'], project_conf['ps_unique_index'])
-    project_conf['ps_policy_path'] = '/root/files/ps_policy.json'
-    project_conf['ps_roles_path'] = '/root/files/ps_roles.json'
-    project_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(project_conf['service_base_name'],
-                                                          project_conf['project_name'], project_conf['endpoint_tag'])
-    project_conf['ssn_instance_name'] = '{}-ssn'.format(project_conf['service_base_name'])
-    project_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        print('Generating infrastructure names and tags')
+        project_conf = dict()
+        project_conf['edge_unique_index'] = str(uuid.uuid4())[:5]
+        project_conf['ps_unique_index'] = str(uuid.uuid4())[:5]
+        project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+        project_conf['key_name'] = os.environ['conf_key_name']
+        project_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        project_conf['user_keyname'] = project_conf['project_name']
+        project_conf['project_tag'] = (project_conf['project_name'])
+        project_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        project_conf['endpoint_tag'] = project_conf['endpoint_name']
+        try:
+            if os.environ['gcp_vpc_name'] == '':
+                raise KeyError
+            else:
+                project_conf['vpc_name'] = os.environ['gcp_vpc_name']
+        except KeyError:
+            project_conf['vpc_name'] = project_conf['service_base_name'] + '-vpc'
+        project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        project_conf['private_subnet_name'] = '{0}-{1}-{2}-subnet'.format(project_conf['service_base_name'],
+                                                                          project_conf['project_name'],
+                                                                          project_conf['endpoint_name'])
+        project_conf['subnet_name'] = os.environ['gcp_subnet_name']
+        project_conf['region'] = os.environ['gcp_region']
+        project_conf['zone'] = os.environ['gcp_zone']
+        project_conf['vpc_selflink'] = GCPMeta.get_vpc(project_conf['vpc_name'])['selfLink']
+        project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+        project_conf['edge_service_account_name'] = '{}-{}-{}-edge-sa'.format(project_conf['service_base_name'],
+                                                                              project_conf['project_name'],
+                                                                              project_conf['endpoint_name'])
+        project_conf['edge_role_name'] = '{}-{}-{}-{}-edge-role'.format(project_conf['service_base_name'],
+                                                                        project_conf['project_name'],
+                                                                        project_conf['endpoint_name'],
+                                                                        project_conf['edge_unique_index'])
+        project_conf['ps_service_account_name'] = '{}-{}-{}-ps-sa'.format(project_conf['service_base_name'],
+                                                                          project_conf['project_name'],
+                                                                          project_conf['endpoint_name'])
+        project_conf['ps_role_name'] = '{}-{}-{}-{}-ps-role'.format(project_conf['service_base_name'],
+                                                                    project_conf['project_name'],
+                                                                    project_conf['endpoint_name'],
+                                                                    project_conf['ps_unique_index'])
+        project_conf['ps_policy_path'] = '/root/files/ps_policy.json'
+        project_conf['ps_roles_path'] = '/root/files/ps_roles.json'
+        project_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(project_conf['service_base_name'],
+                                                                  project_conf['project_name'],
+                                                                  project_conf['endpoint_name'])
+        project_conf['ssn_instance_name'] = '{}-ssn'.format(project_conf['service_base_name'])
+        project_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+                                                                  project_conf['project_name'],
+                                                                  project_conf['endpoint_name'])
+        project_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(project_conf['service_base_name'],
+                                                                            project_conf['endpoint_name'])
+        project_conf['instance_size'] = os.environ['gcp_edge_instance_size']
+        project_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        project_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        project_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(project_conf['service_base_name'],
+                                                                             project_conf['project_name'],
+                                                                             project_conf['endpoint_name'])
+        project_conf['fw_edge_ingress_public'] = '{}-sg-ingress-public'.format(project_conf['instance_name'])
+        project_conf['fw_edge_ingress_internal'] = '{}-sg-ingress-internal'.format(project_conf['instance_name'])
+        project_conf['fw_edge_egress_public'] = '{}-sg-egress-public'.format(project_conf['instance_name'])
+        project_conf['fw_edge_egress_internal'] = '{}-sg-egress-internal'.format(project_conf['instance_name'])
+        project_conf['ps_firewall_target'] = '{0}-{1}-{2}-ps'.format(project_conf['service_base_name'],
+                                                                     project_conf['project_name'],
+                                                                     project_conf['endpoint_name'])
+        project_conf['fw_common_name'] = '{}-{}-{}-ps'.format(project_conf['service_base_name'],
                                                               project_conf['project_name'],
                                                               project_conf['endpoint_name'])
-    project_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(project_conf['service_base_name'],
-                                                                        project_conf['endpoint_name'])
-    project_conf['instance_size'] = os.environ['gcp_edge_instance_size']
-    project_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    project_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    project_conf['static_address_name'] = '{0}-{1}-ip'.format(project_conf['service_base_name'], project_conf['project_name'])
-    project_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(project_conf['instance_name'])
-    project_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(project_conf['instance_name'])
-    project_conf['fw_edge_egress_public'] = '{}-egress-public'.format(project_conf['instance_name'])
-    project_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(project_conf['instance_name'])
-    project_conf['ps_firewall_target'] = '{0}-{1}-ps'.format(project_conf['service_base_name'],
-                                                          project_conf['project_name'])
-    project_conf['fw_common_name'] = '{}-{}-ps'.format(project_conf['service_base_name'], project_conf['project_name'])
-    project_conf['fw_ps_ingress'] = '{}-ingress'.format(project_conf['fw_common_name'])
-    project_conf['fw_ps_egress_private'] = '{}-egress-private'.format(project_conf['fw_common_name'])
-    project_conf['fw_ps_egress_public'] = '{}-egress-public'.format(project_conf['fw_common_name'])
-    project_conf['network_tag'] = project_conf['instance_name']
-    project_conf['instance_labels'] = {"name": project_conf['instance_name'],
-                                    "sbn": project_conf['service_base_name'],
-                                    "project_tag": project_conf['project_tag'],
-                                    "endpoint_tag": project_conf['endpoint_tag'],
-                                    "product": "dlab"}
-    project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
-    project_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
-    if 'conf_user_subnets_range' in os.environ:
-        project_conf['user_subnets_range'] = os.environ['conf_user_subnets_range']
-    else:
-        project_conf['user_subnets_range'] = ''
+        project_conf['fw_ps_ingress'] = '{}-sg-ingress'.format(project_conf['fw_common_name'])
+        project_conf['fw_ps_egress_private'] = '{}-sg-egress-private'.format(project_conf['fw_common_name'])
+        project_conf['fw_ps_egress_public'] = '{}-sg-egress-public'.format(project_conf['fw_common_name'])
+        project_conf['network_tag'] = project_conf['instance_name']
+        project_conf['instance_labels'] = {"name": project_conf['instance_name'],
+                                           "sbn": project_conf['service_base_name'],
+                                           "project_tag": project_conf['project_tag'],
+                                           "endpoint_tag": project_conf['endpoint_tag'],
+                                           "product": "dlab"}
+        project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
+        project_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+        if 'conf_user_subnets_range' in os.environ:
+            project_conf['user_subnets_range'] = os.environ['conf_user_subnets_range']
+        else:
+            project_conf['user_subnets_range'] = ''
 
-    # FUSE in case of absence of user's key
-    try:
-        project_conf['user_key'] = os.environ['key']
+        # FUSE in case of absence of user's key
         try:
-            local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'], os.environ['project_name']))
-        except:
-            print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
-    except KeyError:
-        print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
-        sys.exit(1)
+            project_conf['user_key'] = os.environ['key']
+            try:
+                local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
+                                                        project_conf['project_name']))
+            except:
+                print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
+        except KeyError:
+            print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+            sys.exit(1)
 
-    print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(
-        project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
-    logging.info(json.dumps(project_conf))
+        print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(
+            project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+        logging.info(json.dumps(project_conf))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+        sys.exit(1)
 
     try:
         logging.info('[CREATE SUBNET]')
@@ -131,18 +156,17 @@
                          project_conf['user_subnets_range'])
         try:
             local("~/scripts/{}.py {}".format('common_create_subnet', params))
-            project_conf['private_subnet_cidr'] = GCPMeta().get_subnet(project_conf['private_subnet_name'],
-                                                                    project_conf['region'])['ipCidrRange']
+            project_conf['private_subnet_cidr'] = GCPMeta.get_subnet(project_conf['private_subnet_name'],
+                                                                     project_conf['region'])['ipCidrRange']
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+            GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         except:
             print("Subnet hasn't been created.")
-        append_result("Failed to create subnet.", str(err))
+        dlab.fab.append_result("Failed to create subnet.", str(err))
         sys.exit(1)
 
     print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
@@ -150,8 +174,9 @@
     try:
         logging.info('[CREATE SERVICE ACCOUNT AND ROLE FOR EDGE NODE]')
         print('[CREATE SERVICE ACCOUNT AND ROLE FOR EDGE NODE]')
-        params = "--service_account_name {} --role_name {} --unique_index {} --service_base_name {}".format(project_conf['edge_service_account_name'],
-                                                                   project_conf['edge_role_name'], project_conf['edge_unique_index'], project_conf['service_base_name'])
+        params = "--service_account_name {} --role_name {} --unique_index {} --service_base_name {}".format(
+            project_conf['edge_service_account_name'], project_conf['edge_role_name'],
+            project_conf['edge_unique_index'], project_conf['service_base_name'])
 
         try:
             local("~/scripts/{}.py {}".format('common_create_service_account', params))
@@ -159,22 +184,23 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-            GCPActions().remove_role(project_conf['edge_role_name'])
+            GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                              project_conf['service_base_name'])
+            GCPActions.remove_role(project_conf['edge_role_name'])
         except:
             print("Service account or role hasn't been created")
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
-        append_result("Failed to creating service account and role.", str(err))
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to creating service account and role.", str(err))
         sys.exit(1)
 
     try:
         logging.info('[CREATE SERVICE ACCOUNT AND ROLE FOR PRIVATE SUBNET]')
         print('[CREATE SERVICE ACCOUNT AND ROLE FOR NOTEBOOK NODE]')
-        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} --service_base_name {}".format(
-            project_conf['ps_service_account_name'], project_conf['ps_role_name'],
-            project_conf['ps_policy_path'], project_conf['ps_roles_path'], project_conf['ps_unique_index'], project_conf['service_base_name'])
+        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} " \
+                 "--service_base_name {}".format(
+                  project_conf['ps_service_account_name'], project_conf['ps_role_name'], project_conf['ps_policy_path'],
+                  project_conf['ps_roles_path'], project_conf['ps_unique_index'], project_conf['service_base_name'])
 
         try:
             local("~/scripts/{}.py {}".format('common_create_service_account', params))
@@ -182,16 +208,17 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
         try:
-            GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
-            GCPActions().remove_role(project_conf['ps_role_name'])
+            GCPActions.remove_service_account(project_conf['ps_service_account_name'],
+                                              project_conf['service_base_name'])
+            GCPActions.remove_role(project_conf['ps_role_name'])
         except:
             print("Service account or role hasn't been created")
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
-        append_result("Failed to creating service account and role.", str(err))
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to creating service account and role.", str(err))
         sys.exit(1)
 
     try:
@@ -273,13 +300,13 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        append_result("Failed to create firewall for Edge node.", str(err))
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        dlab.fab.append_result("Failed to create firewall for Edge node.", str(err))
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
@@ -295,8 +322,8 @@
             project_conf['ps_firewall_target']
         ]
         ingress_rule['sourceRanges'] = [project_conf['private_subnet_cidr'],
-                                        GCPMeta().get_subnet(project_conf['subnet_name'],
-                                                             project_conf['region'])['ipCidrRange']
+                                        GCPMeta.get_subnet(project_conf['subnet_name'],
+                                                           project_conf['region'])['ipCidrRange']
                                         ]
         rules = [
             {
@@ -314,8 +341,8 @@
             project_conf['ps_firewall_target']
         ]
         egress_rule['destinationRanges'] = [project_conf['private_subnet_cidr'],
-                                            GCPMeta().get_subnet(project_conf['subnet_name'],
-                                                                 project_conf['region'])['ipCidrRange']
+                                            GCPMeta.get_subnet(project_conf['subnet_name'],
+                                                               project_conf['region'])['ipCidrRange']
                                             ]
         rules = [
             {
@@ -351,17 +378,17 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create firewall for private subnet.", str(err))
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to create firewall for private subnet.", str(err))
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
@@ -372,7 +399,8 @@
             "endpoint_tag": project_conf['endpoint_tag'],
             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
             "sbn": project_conf['service_base_name']}
-        params = "--bucket_name {} --tags '{}'".format(project_conf['shared_bucket_name'], json.dumps(project_conf['shared_bucket_tags']))
+        params = "--bucket_name {} --tags '{}'".format(project_conf['shared_bucket_name'],
+                                                       json.dumps(project_conf['shared_bucket_tags']))
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
         except:
@@ -385,7 +413,8 @@
             os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
             "sbn": project_conf['service_base_name'],
             "project_tag": project_conf['project_tag']}
-        params = "--bucket_name {} --tags '{}'".format(project_conf['bucket_name'], json.dumps(project_conf['bucket_tags']))
+        params = "--bucket_name {} --tags '{}'".format(project_conf['bucket_name'],
+                                                       json.dumps(project_conf['bucket_tags']))
 
         try:
             local("~/scripts/{}.py {}".format('common_create_bucket', params))
@@ -393,43 +422,45 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create bucket.", str(err))
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Unable to create bucket.", str(err))
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
         logging.info('[SET PERMISSIONS FOR USER AND SHARED BUCKETS]')
         print('[SET PERMISSIONS FOR USER AND SHARED BUCKETS]')
-        GCPActions().set_bucket_owner(project_conf['bucket_name'], project_conf['ps_service_account_name'], project_conf['service_base_name'])
-        GCPActions().set_bucket_owner(project_conf['shared_bucket_name'], project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.set_bucket_owner(project_conf['bucket_name'], project_conf['ps_service_account_name'],
+                                    project_conf['service_base_name'])
+        GCPActions.set_bucket_owner(project_conf['shared_bucket_name'], project_conf['ps_service_account_name'],
+                                    project_conf['service_base_name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set bucket permissions.", str(err))
-        GCPActions().remove_bucket(project_conf['bucket_name'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        dlab.fab.append_result("Failed to set bucket permissions.", str(err))
+        GCPActions.remove_bucket(project_conf['bucket_name'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     try:
@@ -442,64 +473,67 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create static ip.", str(err))
+        dlab.fab.append_result("Failed to create static ip.", str(err))
         try:
-            GCPActions().remove_static_address(project_conf['static_address_name'], project_conf['region'])
+            GCPActions.remove_static_address(project_conf['static_address_name'], project_conf['region'])
         except:
             print("Static IP address hasn't been created.")
-        GCPActions().remove_bucket(project_conf['bucket_name'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        GCPActions.remove_bucket(project_conf['bucket_name'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
         sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
+        project_conf['initial_user'] = 'ubuntu'
+        project_conf['sudo_group'] = 'sudo'
     if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
+        project_conf['initial_user'] = 'ec2-user'
+        project_conf['sudo_group'] = 'wheel'
 
     try:
         project_conf['static_ip'] = \
-            GCPMeta().get_static_address(project_conf['region'], project_conf['static_address_name'])['address']
+            GCPMeta.get_static_address(project_conf['region'], project_conf['static_address_name'])['address']
         logging.info('[CREATE EDGE INSTANCE]')
         print('[CREATE EDGE INSTANCE]')
-        params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} --ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} --instance_class {} --static_ip {} --network_tag {} --labels '{}' --service_base_name {}".\
-            format(project_conf['instance_name'], project_conf['region'], project_conf['zone'], project_conf['vpc_name'],
-                   project_conf['subnet_name'], project_conf['instance_size'], project_conf['ssh_key_path'], initial_user,
-                   project_conf['edge_service_account_name'], project_conf['image_name'], 'edge', project_conf['static_ip'],
-                   project_conf['network_tag'], json.dumps(project_conf['instance_labels']), project_conf['service_base_name'])
+        params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} " \
+                 "--ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} --instance_class {} " \
+                 "--static_ip {} --network_tag {} --labels '{}' --service_base_name {}".format(
+                  project_conf['instance_name'], project_conf['region'], project_conf['zone'], project_conf['vpc_name'],
+                  project_conf['subnet_name'], project_conf['instance_size'], project_conf['ssh_key_path'],
+                  project_conf['initial_user'], project_conf['edge_service_account_name'], project_conf['image_name'],
+                  'edge', project_conf['static_ip'], project_conf['network_tag'],
+                  json.dumps(project_conf['instance_labels']), project_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create instance.", str(err))
-        GCPActions().remove_static_address(project_conf['static_address_name'], project_conf['region'])
-        GCPActions().remove_bucket(project_conf['bucket_name'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
-        GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
-        GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
-        GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
-        GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['ps_role_name'])
-        GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
-        GCPActions().remove_role(project_conf['edge_role_name'])
-        GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Failed to create instance.", str(err))
+        GCPActions.remove_static_address(project_conf['static_address_name'], project_conf['region'])
+        GCPActions.remove_bucket(project_conf['bucket_name'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+        GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+        GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+        GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+        GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['ps_role_name'])
+        GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+                                          project_conf['service_base_name'])
+        GCPActions.remove_role(project_conf['edge_role_name'])
+        GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
index 57af4ad..96c021d 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
@@ -22,12 +22,17 @@
 # ******************************************************************************
 
 import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import logging
+import traceback
 import requests
 
+
 def terminate_edge_node(endpoint_name, project_name, service_base_name, region, zone):
     print("Terminating Dataengine-service clusters")
     try:
@@ -35,110 +40,124 @@
             {'sbn': service_base_name},
             {'project_tag': project_name}
         ]
-        clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+        clusters_list = GCPMeta.get_dataproc_list(labels)
         if clusters_list:
             for cluster_name in clusters_list:
-                actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+                GCPActions.delete_dataproc_cluster(cluster_name, region)
                 print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
         else:
             print("There are no Dataproc clusters to terminate.")
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate dataengine-service", str(err))
         sys.exit(1)
 
     print("Terminating EDGE and notebook instances")
     base = '{}-{}-{}'.format(service_base_name, project_name, endpoint_name)
-    keys = ['edge', 'ps', 'ip', 'bucket', 'subnet']
+    keys = ['edge', 'ps', 'static-ip', 'bucket', 'subnet']
     targets = ['{}-{}'.format(base, k) for k in keys]
     try:
-        instances = GCPMeta().get_list_instances(zone, base)
+        instances = GCPMeta.get_list_instances(zone, base)
         if 'items' in instances:
             for i in instances['items']:
                 if 'project_tag' in i['labels'] and project_name == i['labels']['project_tag']:
-                    GCPActions().remove_instance(i['name'], zone)
+                    GCPActions.remove_instance(i['name'], zone)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to terminate instances", str(err))
         sys.exit(1)
 
     print("Removing static addresses")
     try:
-        static_addresses = GCPMeta().get_list_static_addresses(region, base)
+        static_addresses = GCPMeta.get_list_static_addresses(region, base)
         if 'items' in static_addresses:
             for i in static_addresses['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_static_address(i['name'], region)
+                    GCPActions.remove_static_address(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove static addresses", str(err))
         sys.exit(1)
 
     print("Removing storage bucket")
     try:
-        buckets = GCPMeta().get_list_buckets(base)
+        buckets = GCPMeta.get_list_buckets(base)
         if 'items' in buckets:
             for i in buckets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_bucket(i['name'])
+                    GCPActions.remove_bucket(i['name'])
+    except Exception as err:
+        dlab.fab.append_result("Failed to remove storage buckets", str(err))
+        sys.exit(1)
+
+    print("Removing project specific images")
+    try:
+        project_image_name_beginning = '{}-{}'.format(service_base_name, project_name)
+        images = GCPMeta.get_list_images(project_image_name_beginning)
+        if 'items' in images:
+            for i in images['items']:
+                GCPActions.remove_image(i['name'])
     except Exception as err:
         print('Error: {0}'.format(err))
         sys.exit(1)
 
     print("Removing firewalls")
     try:
-        firewalls = GCPMeta().get_list_firewalls(base)
+        firewalls = GCPMeta.get_list_firewalls(base)
         if 'items' in firewalls:
             for i in firewalls['items']:
                 if bool(set(targets) & set(i['targetTags'])):
-                    GCPActions().remove_firewall(i['name'])
+                    GCPActions.remove_firewall(i['name'])
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove security groups", str(err))
         sys.exit(1)
 
     print("Removing Service accounts and roles")
     try:
-        list_service_accounts = GCPMeta().get_list_service_accounts()
-        role_targets = ['{}-{}-{}'.format(base, meta_lib.GCPMeta().get_index_by_service_account_name('{}-{}'.format(base, k)), k) for k in keys]
-        for service_account in (set(targets) & set(list_service_accounts)):
-            if service_account.startswith(service_base_name) and service_account.endswith('-edge'):
-                GCPActions().remove_service_account(service_account, service_base_name)
-            elif service_account.startswith(service_base_name) and service_account.endswith('-ps'):
-                GCPActions().remove_service_account(service_account, service_base_name)
-        list_roles_names = GCPMeta().get_list_roles()
+        list_service_accounts = GCPMeta.get_list_service_accounts()
+        sa_keys = ['edge-sa', 'ps-sa']
+        role_keys = ['edge-role', 'ps-role']
+        sa_target = ['{}-{}'.format(base, k) for k in sa_keys]
+        indexes = [GCPMeta.get_index_by_service_account_name('{}-{}'.format(base, k)) for k in sa_keys]
+        role_targets = ['{}-{}-{}'.format(base, i, k) for k in role_keys for i in indexes]
+        for service_account in (set(sa_target) & set(list_service_accounts)):
+            GCPActions.remove_service_account(service_account, service_base_name)
+        list_roles_names = GCPMeta.get_list_roles()
         for role in (set(role_targets) & set(list_roles_names)):
-            if role.startswith(service_base_name):
-                GCPActions().remove_role(role)
+            GCPActions.remove_role(role)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove service accounts and roles", str(err))
         sys.exit(1)
 
     print("Removing subnets")
     try:
-        list_subnets = GCPMeta().get_list_subnetworks(region, '', base)
+        list_subnets = GCPMeta.get_list_subnetworks(region, '', base)
         if 'items' in list_subnets:
             vpc_selflink = list_subnets['items'][0]['network']
             vpc_name = vpc_selflink.split('/')[-1]
-            subnets = GCPMeta().get_list_subnetworks(region, vpc_name, base)
+            subnets = GCPMeta.get_list_subnetworks(region, vpc_name, base)
             for i in subnets['items']:
                 if bool(set(targets) & set([i['name']])):
-                    GCPActions().remove_subnet(i['name'], region)
+                    GCPActions.remove_subnet(i['name'], region)
     except Exception as err:
-        print('Error: {0}'.format(err))
+        dlab.fab.append_result("Failed to remove subnets", str(err))
         sys.exit(1)
 
 
 if __name__ == "__main__":
-    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+    local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+                                               os.environ['request_id'])
     local_log_filepath = "/logs/project/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
 
     # generating variables dictionary
+    GCPMeta = dlab.meta_lib.GCPMeta()
+    GCPActions = dlab.actions_lib.GCPActions()
     print('Generating infrastructure names and tags')
     project_conf = dict()
-    project_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
+    project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+    project_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+    project_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+    project_conf['project_tag'] = project_conf['project_name']
     project_conf['region'] = os.environ['gcp_region']
     project_conf['zone'] = os.environ['gcp_zone']
 
@@ -146,11 +165,12 @@
         logging.info('[TERMINATE EDGE]')
         print('[TERMINATE EDGE]')
         try:
-            terminate_edge_node(project_conf['endpoint_name'], project_conf['project_name'], project_conf['service_base_name'],
+            terminate_edge_node(project_conf['endpoint_name'], project_conf['project_name'],
+                                project_conf['service_base_name'],
                                 project_conf['region'], project_conf['zone'])
         except Exception as err:
             traceback.print_exc()
-            append_result("Failed to terminate edge.", str(err))
+            dlab.fab.append_result("Failed to terminate edge.", str(err))
     except Exception as err:
         print('Error: {0}'.format(err))
         sys.exit(1)
@@ -158,8 +178,10 @@
     try:
         print('[KEYCLOAK PROJECT CLIENT DELETE]')
         logging.info('[KEYCLOAK PROJECT CLIENT DELETE]')
-        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(os.environ['keycloak_auth_server_url'])
-        keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'], os.environ['keycloak_realm_name'])
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            os.environ['keycloak_auth_server_url'])
+        keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+                                                                    os.environ['keycloak_realm_name'])
 
         keycloak_auth_data = {
             "username": os.environ['keycloak_user'],
@@ -169,7 +191,8 @@
         }
 
         client_params = {
-            "clientId": project_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'],
+            "clientId": "{}-{}-{}".format(project_conf['service_base_name'], project_conf['project_name'],
+                                          project_conf['endpoint_name'])
         }
 
         keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
@@ -192,11 +215,11 @@
 
     try:
         with open("/root/result.json", 'w') as result:
-            res = {"service_base_name": edge_conf['service_base_name'],
-                   "project_name": edge_conf['project_name'],
+            res = {"service_base_name": project_conf['service_base_name'],
+                   "project_name": project_conf['project_name'],
                    "Action": "Terminate project"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
index d5e5b08..a51df46 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
@@ -24,10 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -38,68 +40,78 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['ip_address'] = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    notebook_config['rstudio_pass'] = id_generator()
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        notebook_config['ip_address'] = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -107,9 +119,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -127,9 +138,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -145,9 +155,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring RStudio and all dependencies
@@ -157,7 +166,7 @@
         params = "--hostname {0}  --keyfile {1} " \
                  "--region {2} --rstudio_pass {3} " \
                  "--rstudio_version {4} --os_user {5} " \
-                 "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+                 "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
             .format(instance_hostname, notebook_config['ssh_key_path'],
                     os.environ['gcp_region'], notebook_config['rstudio_pass'],
                     os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -169,9 +178,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure RStudio.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure RStudio.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -180,16 +188,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -201,35 +209,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
         
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -254,54 +261,58 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
         print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "RStudio",
-                    "url": rstudio_notebook_access_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_access_url}#,
-                   #{"description": "RStudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "RStudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "RStudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
index 54b9781..ce37274 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
@@ -21,15 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
 import sys, os
 from fabric.api import *
-from dlab.ssn_lib import *
 import traceback
 import json
 import argparse
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--ssn_unique_index', type=str, default='')
@@ -41,20 +42,36 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
+
+    def clear_resources():
+        GCPActions.remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
+        GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+        GCPActions.remove_role(ssn_conf['role_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if  not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
 
     try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
         logging.info('[DERIVING NAMES]')
         print('[DERIVING NAMES]')
-        pre_defined_vpc = False
-        pre_defined_subnet = False
-        pre_defined_firewall = False
-        billing_enabled = True
-
         ssn_conf = dict()
+        ssn_conf['instance'] = 'ssn'
+        ssn_conf['pre_defined_vpc'] = False
+        ssn_conf['pre_defined_subnet'] = False
+        ssn_conf['pre_defined_firewall'] = False
+        ssn_conf['billing_enabled'] = True
+
         ssn_conf['ssn_unique_index'] = args.ssn_unique_index
-        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-            os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+            os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['role_name'] = '{}-{}-ssn-role'.format(ssn_conf['service_base_name'], ssn_conf['ssn_unique_index'])
         ssn_conf['region'] = os.environ['gcp_region']
         ssn_conf['zone'] = os.environ['gcp_zone']
         ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
@@ -64,19 +81,19 @@
             if os.environ['gcp_vpc_name'] == '':
                 raise KeyError
             else:
-                pre_defined_vpc = True
+                ssn_conf['pre_defined_vpc'] = True
                 ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
         except KeyError:
-            ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
+            ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
 
         try:
             if os.environ['gcp_subnet_name'] == '':
                 raise KeyError
             else:
-                pre_defined_subnet = True
+                ssn_conf['pre_defined_subnet'] = True
                 ssn_conf['subnet_name'] = os.environ['gcp_subnet_name']
         except KeyError:
-            ssn_conf['subnet_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
+            ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
         try:
             if os.environ['gcp_firewall_name'] == '':
                 raise KeyError
@@ -84,13 +101,11 @@
                 pre_defined_firewall = True
                 ssn_conf['firewall_name'] = os.environ['gcp_firewall_name']
         except KeyError:
-            ssn_conf['firewall_name'] = '{}-ssn-firewall'.format(ssn_conf['service_base_name'])
-        ssn_conf['subnet_cidr'] = '10.10.1.0/24'
+            ssn_conf['firewall_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
         ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
         ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
         ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
         ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-        ssn_conf['role_name'] = ssn_conf['service_base_name'] + '-' + ssn_conf['ssn_unique_index'] + '-ssn-role'
 
         try:
             if os.environ['aws_account_id'] == '':
@@ -98,44 +113,36 @@
             if os.environ['aws_billing_bucket'] == '':
                 raise KeyError
         except KeyError:
-            billing_enabled = False
-        if not billing_enabled:
+            ssn_conf['billing_enabled'] = False
+        if not ssn_conf['billing_enabled']:
             os.environ['aws_account_id'] = 'None'
             os.environ['aws_billing_bucket'] = 'None'
             os.environ['aws_report_path'] = 'None'
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed deriving names.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed deriving names.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
-        instance_hostname = GCPMeta().get_instance_public_ip_by_name(ssn_conf['instance_name'])
+        ssn_conf['instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(ssn_conf['instance_name'])
         if os.environ['conf_stepcerts_enabled'] == 'true':
-            step_cert_sans = ' --san {0} --san {1}'.format(GCPMeta().get_instance_public_ip_by_name(
-                ssn_conf['instance_name']), get_instance_private_ip_address('ssn', ssn_conf['instance_name']))
+            ssn_conf['step_cert_sans'] = ' --san {0} --san {1}'.format(GCPMeta.get_instance_public_ip_by_name(
+                ssn_conf['instance_name']), dlab.meta_lib.get_instance_private_ip_address('ssn',
+                                                                                          ssn_conf['instance_name']))
         else:
-            step_cert_sans = ''
+            ssn_conf['step_cert_sans'] = ''
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            ssn_conf['initial_user'] = 'ubuntu'
+            ssn_conf['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            ssn_conf['initial_user'] = 'ec2-user'
+            ssn_conf['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, ssn_conf['ssh_key_path'], initial_user, ssn_conf['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], ssn_conf['initial_user'],
+            ssn_conf['dlab_ssh_user'], ssn_conf['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -143,18 +150,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab-user'.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -163,7 +160,7 @@
         params = "--hostname {} --keyfile {} --pip_packages " \
                  "'boto3 backoff argparse fabric==1.14.0 awscli pymongo pyyaml " \
                  "google-api-python-client google-cloud-storage pycrypto' --user {} --region {}". \
-            format(instance_hostname, ssn_conf['ssh_key_path'],
+            format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'],
                    ssn_conf['dlab_ssh_user'], ssn_conf['region'])
 
         try:
@@ -172,18 +169,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing software: pip, packages.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -195,9 +182,9 @@
                              "subnet_id": ssn_conf['subnet_name'], "admin_key": os.environ['conf_key_name']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
                  "--tag_resource_id {} --step_cert_sans '{}'". \
-            format(instance_hostname, ssn_conf['ssh_key_path'], json.dumps(additional_config),
+            format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
                    ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'],
-                   step_cert_sans)
+                   ssn_conf['step_cert_sans'])
 
         try:
             local("~/scripts/{}.py {}".format('configure_ssn_node', params))
@@ -205,18 +192,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed configuring ssn.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Failed configuring ssn.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -235,8 +212,9 @@
                              {"name": "deeplearning", "tag": "latest"},
                              {"name": "dataengine", "tag": "latest"},
                              {"name": "dataengine-service", "tag": "latest"}]
-        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} --cloud_provider {} --region {}". \
-            format(instance_hostname, ssn_conf['ssh_key_path'], json.dumps(additional_config),
+        params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
+                 "--cloud_provider {} --region {}". \
+            format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
                    os.environ['conf_os_family'], ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
                    os.environ['conf_cloud_provider'], ssn_conf['region'])
 
@@ -246,18 +224,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to configure docker.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Unable to configure docker.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -267,7 +235,7 @@
         cloud_params = [
             {
                 'key': 'KEYCLOAK_REDIRECT_URI',
-                'value': "https://{0}/".format(instance_hostname)
+                'value': "https://{0}/".format(ssn_conf['instance_hostname'])
             },
             {
                 'key': 'KEYCLOAK_REALM_NAME',
@@ -470,18 +438,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to configure UI.", str(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.dlab.fab.append_result("Unable to configure UI.", str(err))
+        clear_resources()
         sys.exit(1)
 
     try:
@@ -489,7 +447,7 @@
         print('[SUMMARY]')
         print("Service base name: {}".format(ssn_conf['service_base_name']))
         print("SSN Name: {}".format(ssn_conf['instance_name']))
-        print("SSN Hostname: {}".format(instance_hostname))
+        print("SSN Hostname: {}".format(ssn_conf['instance_hostname']))
         print("Role name: {}".format(ssn_conf['role_name']))
         print("Key name: {}".format(os.environ['conf_key_name']))
         print("VPC Name: {}".format(ssn_conf['vpc_name']))
@@ -498,12 +456,12 @@
         print("SSN instance size: {}".format(ssn_conf['instance_size']))
         print("SSN AMI name: {}".format(ssn_conf['image_name']))
         print("Region: {}".format(ssn_conf['region']))
-        jenkins_url = "http://{}/jenkins".format(instance_hostname)
-        jenkins_url_https = "https://{}/jenkins".format(instance_hostname)
+        jenkins_url = "http://{}/jenkins".format(ssn_conf['instance_hostname'])
+        jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instance_hostname'])
         print("Jenkins URL: {}".format(jenkins_url))
         print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
-        print("DLab UI HTTP URL: http://{}".format(instance_hostname))
-        print("DLab UI HTTPS URL: https://{}".format(instance_hostname))
+        print("DLab UI HTTP URL: http://{}".format(ssn_conf['instance_hostname']))
+        print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instance_hostname']))
         try:
             with open('jenkins_creds.txt') as f:
                 print(f.read())
@@ -513,10 +471,8 @@
         with open("/root/result.json", 'w') as f:
             res = {"service_base_name": ssn_conf['service_base_name'],
                    "instance_name": ssn_conf['instance_name'],
-                   "instance_hostname": instance_hostname,
+                   "instance_hostname": ssn_conf['instance_hostname'],
                    "role_name": ssn_conf['role_name'],
-                   #"role_profile_name": role_profile_name,
-                   #"policy_name": policy_name,
                    "master_keyname": os.environ['conf_key_name'],
                    "vpc_id": ssn_conf['vpc_name'],
                    "subnet_id": ssn_conf['subnet_name'],
@@ -528,18 +484,10 @@
 
         print('Upload response file')
         params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
-            format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'], instance_hostname)
+            format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'],
+                   ssn_conf['instance_hostname'])
         local("~/scripts/{}.py {}".format('upload_response_file', params))
     except Exception as err:
-        print('Error: {0}'.format(err))
-        GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        dlab.fab.append_result("Error with writing results.", str(err))
+        clear_resources()
         sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
index cf4b045..8cf209d 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
@@ -21,14 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
 import sys, os
 from fabric.api import *
-from dlab.ssn_lib import *
 import json
 import argparse
+import logging
+import traceback
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--ssn_unique_index', type=str, default='')
@@ -36,52 +38,58 @@
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
-    local_log_filepath = "/logs/" + os.environ['conf_resource'] +  "/" + local_log_filename
+    local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-    instance = 'ssn'
-    pre_defined_vpc = False
-    pre_defined_subnet = False
-    pre_defined_firewall = False
-    logging.info('[DERIVING NAMES]')
-    print('[DERIVING NAMES]')
-    ssn_conf = dict()
-    ssn_conf['ssn_unique_index'] = args.ssn_unique_index
-    ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
-    ssn_conf['region'] = os.environ['gcp_region']
-    ssn_conf['zone'] = os.environ['gcp_zone']
-    ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
-    ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
-    ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
-    ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
-    ssn_conf['subnet_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
-    ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
-    ssn_conf['subnet_prefix'] = '20'
-    ssn_conf['firewall_name'] = '{}-ssn-firewall'.format(ssn_conf['service_base_name'])
-    ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
-    ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
-    ssn_conf['role_name'] = ssn_conf['service_base_name'] + '-' + ssn_conf['ssn_unique_index'] + '-ssn-role'
-    ssn_conf['static_address_name'] = '{}-ssn-ip'.format(ssn_conf['service_base_name'])
-    ssn_conf['ssn_policy_path'] = '/root/files/ssn_policy.json'
-    ssn_conf['ssn_roles_path'] = '/root/files/ssn_roles.json'
-    ssn_conf['network_tag'] = ssn_conf['instance_name']
-    ssn_conf['instance_labels'] = {"name": ssn_conf['instance_name'],
-                                   "sbn": ssn_conf['service_base_name'],
-                                   os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
-    ssn_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+    try:
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        ssn_conf = dict()
+        ssn_conf['instance'] = 'ssn'
+        ssn_conf['pre_defined_vpc'] = False
+        ssn_conf['pre_defined_subnet'] = False
+        ssn_conf['pre_defined_firewall'] = False
+        logging.info('[DERIVING NAMES]')
+        print('[DERIVING NAMES]')
+        ssn_conf['ssn_unique_index'] = args.ssn_unique_index
+        ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+                os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
+        ssn_conf['region'] = os.environ['gcp_region']
+        ssn_conf['zone'] = os.environ['gcp_zone']
+        ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+        ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
+        ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
+        ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+        ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+        ssn_conf['subnet_prefix'] = '20'
+        ssn_conf['firewall_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+        ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
+        ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+        ssn_conf['role_name'] = '{}-{}-ssn-role'.format(ssn_conf['service_base_name'], ssn_conf['ssn_unique_index'])
+        ssn_conf['static_address_name'] = '{}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+        ssn_conf['ssn_policy_path'] = '/root/files/ssn_policy.json'
+        ssn_conf['ssn_roles_path'] = '/root/files/ssn_roles.json'
+        ssn_conf['network_tag'] = ssn_conf['instance_name']
+        ssn_conf['instance_labels'] = {"name": ssn_conf['instance_name'],
+                                       "sbn": ssn_conf['service_base_name'],
+                                       os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+        ssn_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+    except Exception as err:
+        dlab.fab.dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+        sys.exit(1)
 
-    if GCPMeta().get_instance(ssn_conf['instance_name']):
-        print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+    if GCPMeta.get_instance(ssn_conf['instance_name']):
+        dlab.fab.dlab.fab.append_result("Service base name should be unique and less or equal 20 symbols. "
+                                        "Please try again.")
         sys.exit(1)
 
     try:
         if os.environ['gcp_vpc_name'] == '':
             raise KeyError
         else:
-            pre_defined_vpc = True
+            ssn_conf['pre_defined_vpc'] = True
             ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
     except KeyError:
         try:
@@ -95,21 +103,20 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create VPC. Exception:" + str(err))
-            if not pre_defined_vpc:
+            dlab.fab.append_result("Failed to create VPC.", str(err))
+            if not ssn_conf['pre_defined_vpc']:
                 try:
-                    GCPActions().remove_vpc(ssn_conf['vpc_name'])
+                    GCPActions.remove_vpc(ssn_conf['vpc_name'])
                 except:
                     print("VPC hasn't been created.")
             sys.exit(1)
 
     try:
-        ssn_conf['vpc_selflink'] = GCPMeta().get_vpc(ssn_conf['vpc_name'])['selfLink']
+        ssn_conf['vpc_selflink'] = GCPMeta.get_vpc(ssn_conf['vpc_name'])['selfLink']
         if os.environ['gcp_subnet_name'] == '':
             raise KeyError
         else:
-            pre_defined_subnet = True
+            ssn_conf['pre_defined_subnet'] = True
             ssn_conf['subnet_name'] = os.environ['gcp_subnet_name']
     except KeyError:
         try:
@@ -125,15 +132,14 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create Subnet.", str(err))
-            if not pre_defined_subnet:
+            dlab.fab.append_result("Failed to create Subnet.", str(err))
+            if not ssn_conf['pre_defined_subnet']:
                 try:
-                    GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+                    GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
                 except:
                     print("Subnet hasn't been created.")
-            if not pre_defined_vpc:
-                GCPActions().remove_vpc(ssn_conf['vpc_name'])
+            if not ssn_conf['pre_defined_vpc']:
+                GCPActions.remove_vpc(ssn_conf['vpc_name'])
             sys.exit(1)
 
 
@@ -141,7 +147,7 @@
         if os.environ['gcp_firewall_name'] == '':
             raise KeyError
         else:
-            pre_defined_firewall = True
+            ssn_conf['pre_defined_firewall'] = True
             ssn_conf['firewall_name'] = os.environ['gcp_firewall_name']
     except KeyError:
         try:
@@ -152,7 +158,7 @@
             firewall_rules['egress'] = []
 
             ingress_rule = dict()
-            ingress_rule['name'] = ssn_conf['firewall_name'] + '-ingress'
+            ingress_rule['name'] = '{}-ingress'.format(ssn_conf['firewall_name'])
             ingress_rule['targetTags'] = [ssn_conf['network_tag']]
             ingress_rule['sourceRanges'] = [ssn_conf['allowed_ip_cidr']]
             rules = [
@@ -167,7 +173,7 @@
             firewall_rules['ingress'].append(ingress_rule)
 
             egress_rule = dict()
-            egress_rule['name'] = ssn_conf['firewall_name'] + '-egress'
+            egress_rule['name'] = '{}-egress'.format(ssn_conf['firewall_name'])
             egress_rule['targetTags'] = [ssn_conf['network_tag']]
             egress_rule['destinationRanges'] = [ssn_conf['allowed_ip_cidr']]
             rules = [
@@ -188,40 +194,39 @@
                 traceback.print_exc()
                 raise Exception
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed to create Firewall.", str(err))
-            if not pre_defined_subnet:
-                GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-            if not pre_defined_vpc:
-                GCPActions().remove_vpc(ssn_conf['vpc_name'])
+            dlab.fab.append_result("Failed to create Firewall.", str(err))
+            if not ssn_conf['pre_defined_subnet']:
+                GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+            if not ssn_conf['pre_defined_vpc']:
+                GCPActions.remove_vpc(ssn_conf['vpc_name'])
             sys.exit(1)
 
     try:
         logging.info('[CREATE SERVICE ACCOUNT AND ROLE]')
         print('[CREATE SERVICE ACCOUNT AND ROLE]')
-        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} --service_base_name {}".format(
-            ssn_conf['service_account_name'], ssn_conf['role_name'],
-            ssn_conf['ssn_policy_path'], ssn_conf['ssn_roles_path'], ssn_conf['ssn_unique_index'], ssn_conf['service_base_name'])
+        params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} " \
+                 "--service_base_name {}".format( ssn_conf['service_account_name'], ssn_conf['role_name'],
+                                                  ssn_conf['ssn_policy_path'], ssn_conf['ssn_roles_path'],
+                                                  ssn_conf['ssn_unique_index'], ssn_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_service_account', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create Service account and role.", str(err))
+        dlab.fab.append_result("Unable to create Service account and role.", str(err))
         try:
-            GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-            GCPActions().remove_role(ssn_conf['role_name'])
+            GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+            GCPActions.remove_role(ssn_conf['role_name'])
         except:
             print("Service account hasn't been created")
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
         sys.exit(1)
 
     try:
@@ -234,58 +239,62 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to create static ip.", str(err))
+        dlab.fab.append_result("Failed to create static ip.", str(err))
         try:
-            GCPActions().remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
+            GCPActions.remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
         except:
             print("Static IP address hasn't been created.")
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
+        GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+        GCPActions.remove_role(ssn_conf['role_name'])
+        GCPActions.remove_bucket(ssn_conf['ssn_bucket_name'])
+        GCPActions.remove_bucket(ssn_conf['shared_bucket_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
         sys.exit(1)
 
     if os.environ['conf_os_family'] == 'debian':
-        initial_user = 'ubuntu'
-        sudo_group = 'sudo'
+        ssn_conf['initial_user'] = 'ubuntu'
+        ssn_conf['sudo_group'] = 'sudo'
     if os.environ['conf_os_family'] == 'redhat':
-        initial_user = 'ec2-user'
-        sudo_group = 'wheel'
+        ssn_conf['initial_user'] = 'ec2-user'
+        ssn_conf['sudo_group'] = 'wheel'
 
     try:
-        ssn_conf['static_ip'] = \
-            GCPMeta().get_static_address(ssn_conf['region'], ssn_conf['static_address_name'])['address']
+        ssn_conf['static_ip'] = GCPMeta.get_static_address(ssn_conf['region'],
+                                                           ssn_conf['static_address_name'])['address']
         logging.info('[CREATE SSN INSTANCE]')
         print('[CREATE SSN INSTANCE]')
         params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5}"\
                  " --ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9}"\
-                 " --instance_class {10} --static_ip {11} --network_tag {12} --labels '{13}' --primary_disk_size {14} --service_base_name {15}".\
+                 " --instance_class {10} --static_ip {11} --network_tag {12} --labels '{13}' " \
+                 "--primary_disk_size {14} --service_base_name {15}".\
             format(ssn_conf['instance_name'], ssn_conf['region'], ssn_conf['zone'], ssn_conf['vpc_name'],
-                   ssn_conf['subnet_name'], ssn_conf['instance_size'], ssn_conf['ssh_key_path'], initial_user,
-                   ssn_conf['service_account_name'], ssn_conf['image_name'], 'ssn', ssn_conf['static_ip'],
-                   ssn_conf['network_tag'], json.dumps(ssn_conf['instance_labels']), '20', ssn_conf['service_base_name'])
+                   ssn_conf['subnet_name'], ssn_conf['instance_size'], ssn_conf['ssh_key_path'],
+                   ssn_conf['initial_user'], ssn_conf['service_account_name'], ssn_conf['image_name'], 'ssn',
+                   ssn_conf['static_ip'], ssn_conf['network_tag'], json.dumps(ssn_conf['instance_labels']), '20',
+                   ssn_conf['service_base_name'])
         try:
             local("~/scripts/{}.py {}".format('common_create_instance', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Unable to create ssn instance.", str(err))
-        GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
-        GCPActions().remove_role(ssn_conf['role_name'])
-        GCPActions().remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
-        if not pre_defined_firewall:
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
-            GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
-        if not pre_defined_subnet:
-            GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
-        if not pre_defined_vpc:
-            GCPActions().remove_vpc(ssn_conf['vpc_name'])
-        sys.exit(1)
\ No newline at end of file
+        dlab.fab.append_result("Unable to create ssn instance.", str(err))
+        GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+        GCPActions.remove_role(ssn_conf['role_name'])
+        GCPActions.remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
+        GCPActions.remove_bucket(ssn_conf['ssn_bucket_name'])
+        GCPActions.remove_bucket(ssn_conf['shared_bucket_name'])
+        if not ssn_conf['pre_defined_firewall']:
+            GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+            GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+        if not ssn_conf['pre_defined_subnet']:
+            GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+        if not ssn_conf['pre_defined_vpc']:
+            GCPActions.remove_vpc(ssn_conf['vpc_name'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
index c033429..3e20a15 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
@@ -21,11 +21,16 @@
 #
 # ******************************************************************************
 
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import sys
+import os
+import logging
+import json
+import traceback
 from fabric.api import *
-from dlab.ssn_lib import *
 
 if __name__ == "__main__":
     local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -36,8 +41,8 @@
     # generating variables dictionary
     print('Generating infrastructure names and tags')
     ssn_conf = dict()
-    ssn_conf['service_base_name'] = replace_multi_symbols(
-        os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
+    ssn_conf['service_base_name'] = dlab.fab.replace_multi_symbols(
+        os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
     ssn_conf['region'] = os.environ['gcp_region']
     ssn_conf['zone'] = os.environ['gcp_zone']
     pre_defined_vpc = False
@@ -48,7 +53,7 @@
             pre_defined_vpc = True
             ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
     except KeyError:
-        ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
+        ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
 
     try:
         logging.info('[TERMINATE SSN]')
@@ -61,8 +66,7 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to terminate ssn.", str(err))
+        dlab.fab.append_result("Failed to terminate ssn.", str(err))
         sys.exit(1)
 
     try:
@@ -71,6 +75,6 @@
                    "Action": "Terminate ssn with all service_base_name environment"}
             print(json.dumps(res))
             result.write(json.dumps(res))
-    except:
-        print("Failed writing results.")
-        sys.exit(0)
\ No newline at end of file
+    except Exception as err:
+        dlab.fab.append_result("Error with writing results", str(err))
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
index 6cfd891..021eb4c 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
@@ -25,10 +25,13 @@
 import json
 import sys
 import requests
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
+import uuid
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -38,69 +41,76 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                       "project_tag": notebook_config['project_tag'],
-                                       "endpoint_tag": notebook_config['endpoint_tag'],
-                                       "product": "dlab"}
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
+
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -108,9 +118,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -127,16 +136,17 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
         print('[CONFIGURE KEYCLOAK]')
         logging.info('[CONFIGURE KEYCLOAK]')
-        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(os.environ['keycloak_auth_server_url'])
-        keycloak_client_create_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'], os.environ['keycloak_realm_name'])
+        keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+            os.environ['keycloak_auth_server_url'])
+        keycloak_client_create_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+                                                                           os.environ['keycloak_realm_name'])
         keycloak_auth_data = {
             "username": os.environ['keycloak_user'],
             "password": os.environ['keycloak_user_password'],
@@ -150,14 +160,21 @@
                 "clientId": keycloak_client_id,
             }
             keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
-            keycloak_get_id_client = requests.get(keycloak_client_create_url, data=keycloak_auth_data, params=client_params, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"), "Content-Type": "application/json"})
+            keycloak_get_id_client = requests.get(
+                keycloak_client_create_url, data=keycloak_auth_data, params=client_params,
+                headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+                         "Content-Type": "application/json"})
             json_keycloak_client_id = json.loads(keycloak_get_id_client.text)
             # Check, if response is not empty
             if len(json_keycloak_client_id) != 0:
                 print('Keycloak client {} exists. Getting his required attributes.'.format(keycloak_client_id))
                 keycloak_id_client = json_keycloak_client_id[0]['id']
-                keycloak_client_get_secret_url = ("{0}/{1}/client-secret".format(keycloak_client_create_url, keycloak_id_client))
-                keycloak_client_get_secret = requests.get(keycloak_client_get_secret_url, data=keycloak_auth_data, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"), "Content-Type": "application/json"})
+                keycloak_client_get_secret_url = ("{0}/{1}/client-secret".format(keycloak_client_create_url,
+                                                                                 keycloak_id_client))
+                keycloak_client_get_secret = requests.get(
+                    keycloak_client_get_secret_url, data=keycloak_auth_data,
+                    headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")), "Content-Type":
+                        "application/json"})
                 json_keycloak_client_secret = json.loads(keycloak_client_get_secret.text)
                 keycloak_client_secret = json_keycloak_client_secret['value']
             else:
@@ -169,14 +186,16 @@
                     "redirectUris": ["*"],
                     "secret": keycloak_client_secret,
                 }
-                keycloak_client = requests.post(keycloak_client_create_url, json=keycloak_client_data, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"), "Content-Type": "application/json"})
+                keycloak_client = requests.post(
+                    keycloak_client_create_url, json=keycloak_client_data,
+                    headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+                             "Content-Type": "application/json"})
         except Exception as err:
-            append_result("Failed to configure keycloak.")
+            dlab.fab.append_result("Failed to configure keycloak.")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure keycloak.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure keycloak.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing and configuring superset
@@ -201,9 +220,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure superset.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure superset.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -212,16 +230,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -233,35 +251,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -286,12 +303,11 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -309,9 +325,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy for docker.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -326,47 +341,51 @@
         try:
            local("~/scripts/superset_start.py {}".format(params))
         except:
-             traceback.print_exc()
-             raise Exception
+            traceback.print_exc()
+            raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to start Superset.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to start Superset.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    superset_ip_url = "http://" + ip_address + ":8088/{}/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    superset_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    superset_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("SUPERSET URL: {}".format(superset_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print("ReverseProxyNotebook".format(superset_notebook_acces_url))
-    print("ReverseProxyUngit".format(superset_ungit_acces_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        superset_ip_url = "http://" + ip_address + ":8088/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        superset_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
+        superset_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("SUPERSET URL: {}".format(superset_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print("ReverseProxyNotebook".format(superset_notebook_acces_url))
+        print("ReverseProxyUngit".format(superset_ungit_acces_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
+                                                                                           notebook_config['dlab_ssh_user'],
+                                                                                           ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Superset",
-                    "url": superset_notebook_acces_url},
-                   {"description": "Ungit",
-                    "url": superset_ungit_acces_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Superset",
+                        "url": superset_notebook_acces_url},
+                       {"description": "Ungit",
+                        "url": superset_ungit_acces_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
index f791e74..282e0d0 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
@@ -24,12 +24,13 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import argparse
 import traceback
+from fabric.api import *
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--uuid', type=str, default='')
@@ -43,68 +44,77 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['rstudio_pass'] = id_generator()
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+            notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
 
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -131,9 +140,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -149,9 +157,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and RSTUDIO and all dependencies
@@ -172,9 +179,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure tensoflow-rstudio.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure tensoflow-rstudio.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -183,16 +189,16 @@
         additional_config = {"user_keyname": os.environ['project_name'],
                              "user_keydir": os.environ['conf_key_dir']}
         params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
-            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+            instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+            notebook_config['dlab_ssh_user'])
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -204,21 +210,20 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
@@ -229,11 +234,10 @@
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -258,62 +262,67 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    rstudio_ip_url = "http://" + ip_address + ":8787/"
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(notebook_config['exploratory_name'])
-    rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Rstudio URL: {}".format(rstudio_ip_url))
-    print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
-    print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        rstudio_ip_url = "http://" + ip_address + ":8787/"
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+            notebook_config['exploratory_name'])
+        rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Rstudio URL: {}".format(rstudio_ip_url))
+        print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+        print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Rstudio",
-                    "url": rstudio_notebook_access_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_access_url},
-                   {"description": "Ungit",
-                    "url": rstudio_ungit_access_url}#,
-                   #{"description": "Rstudio (via tunnel)",
-                   # "url": rstudio_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ],
-               "exploratory_user": notebook_config['dlab_ssh_user'],
-               "exploratory_pass": notebook_config['rstudio_pass']}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Rstudio",
+                        "url": rstudio_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": rstudio_ungit_access_url}#,
+                       #{"description": "Rstudio (via tunnel)",
+                       # "url": rstudio_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ],
+                   "exploratory_user": notebook_config['dlab_ssh_user'],
+                   "exploratory_pass": notebook_config['rstudio_pass']}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
index 950a1c0..a50afd1 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
@@ -24,11 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,77 +40,84 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
-
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -126,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -144,9 +151,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring TensorFlow and all dependencies
@@ -163,9 +169,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure TensorFlow.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -179,12 +184,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -196,21 +200,20 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
@@ -221,11 +224,10 @@
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -250,60 +252,63 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        tensorboard_url = "http://" + ip_address + ":6006/"
+        jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+            notebook_config['exploratory_name'])
+        jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("TensorBoard URL: {}".format(tensorboard_url))
+        print("TensorBoard log dir: /var/log/tensorboard")
+        print("Jupyter URL: {}".format(jupyter_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    tensorboard_url = "http://" + ip_address + ":6006/"
-    jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
-    tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
-        notebook_config['exploratory_name'])
-    jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("TensorBoard URL: {}".format(tensorboard_url))
-    print("TensorBoard log dir: /var/log/tensorboard")
-    print("Jupyter URL: {}".format(jupyter_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "tensorboard_log_dir": "/var/log/tensorboard",
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Jupyter",
-                    "url": jupyter_notebook_access_url},
-                   {"description": "TensorBoard",
-                    "url": tensorboard_access_url},
-                   {"description": "Ungit",
-                    "url": jupyter_ungit_access_url}#,
-                   #{"description": "Jupyter (via tunnel)",
-                   # "url": jupyter_ip_url},
-                   #{"description": "TensorBoard (via tunnel)",
-                   # "url": tensorboard_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "tensorboard_log_dir": "/var/log/tensorboard",
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Jupyter",
+                        "url": jupyter_notebook_access_url},
+                       {"description": "TensorBoard",
+                        "url": tensorboard_access_url},
+                       {"description": "Ungit",
+                        "url": jupyter_ungit_access_url}#,
+                       #{"description": "Jupyter (via tunnel)",
+                       # "url": jupyter_ip_url},
+                       #{"description": "TensorBoard (via tunnel)",
+                       # "url": tensorboard_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
index 7981005..4c7839b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
@@ -24,11 +24,12 @@
 import logging
 import json
 import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
 import os
 import traceback
+from fabric.api import *
 
 
 if __name__ == "__main__":
@@ -39,76 +40,84 @@
     logging.basicConfig(format='%(levelname)-8s [%(asctime)s]  %(message)s',
                         level=logging.DEBUG,
                         filename=local_log_filepath)
-
-    notebook_config = dict()
     try:
-        notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
-    except:
-        notebook_config['exploratory_name'] = ''
-    notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
-    notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
-    notebook_config['key_name'] = os.environ['conf_key_name']
-    notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
-    notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
-    notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
-    notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
-                                                               notebook_config['project_name'], os.environ['endpoint_name'],
-                                                               notebook_config['exploratory_name'])
-    notebook_config['image_enabled'] = os.environ['conf_image_enabled']
-    notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
-    if notebook_config['shared_image_enabled'] == 'false':
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
-            os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "project_tag": notebook_config['project_tag'],
-                                           "product": "dlab"}
-    else:
-        notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
-            notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
-        notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
-                                           "endpoint_tag": notebook_config['endpoint_tag'],
-                                           "product": "dlab"}
-    # generating variables regarding EDGE proxy on Notebook instance
-    instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
-                                                   notebook_config['project_name'], notebook_config['endpoint_tag'])
-    edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
-    edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
-    notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-    notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
-    notebook_config['zone'] = os.environ['gcp_zone']
+        GCPMeta = dlab.meta_lib.GCPMeta()
+        GCPActions = dlab.actions_lib.GCPActions()
+        notebook_config = dict()
+        try:
+            notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+        except:
+            notebook_config['exploratory_name'] = ''
+        notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+        notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+        notebook_config['key_name'] = os.environ['conf_key_name']
+        notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+        notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+        notebook_config['project_tag'] = notebook_config['project_name']
+        notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+        notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+        notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+                                                                       notebook_config['project_name'],
+                                                                       notebook_config['endpoint_name'],
+                                                                       notebook_config['exploratory_name'])
+        notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+        if notebook_config['shared_image_enabled'] == 'false':
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+                os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "project_tag": notebook_config['project_tag'],
+                                               "product": "dlab"}
+        else:
+            notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+                notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+            notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+                                               "endpoint_tag": notebook_config['endpoint_tag'],
+                                               "product": "dlab"}
+        # generating variables regarding EDGE proxy on Notebook instance
+        instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+                                                       notebook_config['project_name'],
+                                                       notebook_config['endpoint_name'])
+        edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+        edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+        notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+        notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+        notebook_config['zone'] = os.environ['gcp_zone']
+        notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
 
     try:
         if os.environ['conf_os_family'] == 'debian':
-            initial_user = 'ubuntu'
-            sudo_group = 'sudo'
+            notebook_config['initial_user'] = 'ubuntu'
+            notebook_config['sudo_group'] = 'sudo'
         if os.environ['conf_os_family'] == 'redhat':
-            initial_user = 'ec2-user'
-            sudo_group = 'wheel'
+            notebook_config['initial_user'] = 'ec2-user'
+            notebook_config['sudo_group'] = 'wheel'
 
         logging.info('[CREATING DLAB SSH USER]')
         print('[CREATING DLAB SSH USER]')
-        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
-            (instance_hostname, notebook_config['ssh_key_path'], initial_user,
-             notebook_config['dlab_ssh_user'], sudo_group)
-
+        params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+            instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+            notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
         try:
             local("~/scripts/{}.py {}".format('create_ssh_user', params))
         except:
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed creating ssh user 'dlab'.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # configuring proxy on Notebook instance
@@ -125,9 +134,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure proxy.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure proxy.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # updating repositories & installing python packages
@@ -143,9 +151,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing apps: apt & pip.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     # installing and configuring zeppelin and all dependencies
@@ -179,9 +186,8 @@
             traceback.print_exc()
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to configure zeppelin.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -195,12 +201,11 @@
         try:
             local("~/scripts/{}.py {}".format('install_user_key', params))
         except:
-            append_result("Failed installing users key")
+            dlab.fab.append_result("Failed installing users key")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed installing users key.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed installing users key.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
     try:
@@ -212,36 +217,34 @@
             local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
             local("~/scripts/{}.py {}".format('manage_git_creds', params))
         except:
-            append_result("Failed setup git credentials")
+            dlab.fab.append_result("Failed setup git credentials")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to setup git credentials.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to setup git credentials.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
-
     if notebook_config['image_enabled'] == 'true':
         try:
             print('[CREATING IMAGE]')
-            primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+            primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
             if primary_image_id == '':
                 print("Looks like it's first time we configure notebook server. Creating images.")
-                image_id_list = GCPActions().create_image_from_instance_disks(
+                image_id_list = GCPActions.create_image_from_instance_disks(
                     notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
                     notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
                 if image_id_list and image_id_list[0] != '':
                     print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
                 else:
-                    print("Looks like another image creating operation for your template have been started a moment ago.")
+                    print("Looks like another image creating operation for your template have been started a "
+                          "moment ago.")
                 if image_id_list and image_id_list[1] != '':
                     print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
         except Exception as err:
-            print('Error: {0}'.format(err))
-            append_result("Failed creating image.", str(err))
-            GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
-            GCPActions().remove_image(notebook_config['expected_primary_image_name'])
-            GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+            dlab.fab.append_result("Failed creating image.", str(err))
+            GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+            GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+            GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
             sys.exit(1)
 
     try:
@@ -266,51 +269,53 @@
         try:
             local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
         except:
-            append_result("Failed edge reverse proxy template")
+            dlab.fab.append_result("Failed edge reverse proxy template")
             raise Exception
     except Exception as err:
-        print('Error: {0}'.format(err))
-        append_result("Failed to set edge reverse proxy template.", str(err))
-        GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
         sys.exit(1)
 
+    try:
+        # generating output information
+        ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+        zeppelin_ip_url = "http://" + ip_address + ":8080/"
+        ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+        zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+            notebook_config['exploratory_name'])
+        zeppelin_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+            notebook_config['exploratory_name'])
+        print('[SUMMARY]')
+        logging.info('[SUMMARY]')
+        print("Instance name: {}".format(notebook_config['instance_name']))
+        print("Private IP: {}".format(ip_address))
+        print("Instance type: {}".format(notebook_config['instance_type']))
+        print("Key name: {}".format(notebook_config['key_name']))
+        print("User key name: {}".format(os.environ['project_name']))
+        print("Zeppelin URL: {}".format(zeppelin_ip_url))
+        print("Ungit URL: {}".format(ungit_ip_url))
+        print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+            notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
 
-    # generating output information
-    ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
-    zeppelin_ip_url = "http://" + ip_address + ":8080/"
-    ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
-    zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
-        notebook_config['exploratory_name'])
-    zeppelin_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
-        notebook_config['exploratory_name'])
-    print('[SUMMARY]')
-    logging.info('[SUMMARY]')
-    print("Instance name: {}".format(notebook_config['instance_name']))
-    print("Private IP: {}".format(ip_address))
-    print("Instance type: {}".format(notebook_config['instance_type']))
-    print("Key name: {}".format(notebook_config['key_name']))
-    print("User key name: {}".format(os.environ['project_name']))
-    print("Zeppelin URL: {}".format(zeppelin_ip_url))
-    print("Ungit URL: {}".format(ungit_ip_url))
-    print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
-                                                                                       notebook_config['dlab_ssh_user'],
-                                                                                       ip_address))
-
-    with open("/root/result.json", 'w') as result:
-        res = {"hostname": ip_address,
-               "ip": ip_address,
-               "instance_id": notebook_config['instance_name'],
-               "master_keyname": os.environ['conf_key_name'],
-               "notebook_name": notebook_config['instance_name'],
-               "Action": "Create new notebook server",
-               "exploratory_url": [
-                   {"description": "Apache Zeppelin",
-                    "url": zeppelin_notebook_access_url},
-                   {"description": "Ungit",
-                    "url": zeppelin_ungit_access_url}#,
-                   #{"description": "Apache Zeppelin (via tunnel)",
-                   # "url": zeppelin_ip_url},
-                   #{"description": "Ungit (via tunnel)",
-                   # "url": ungit_ip_url}
-               ]}
-        result.write(json.dumps(res))
\ No newline at end of file
+        with open("/root/result.json", 'w') as result:
+            res = {"hostname": ip_address,
+                   "ip": ip_address,
+                   "instance_id": notebook_config['instance_name'],
+                   "master_keyname": os.environ['conf_key_name'],
+                   "notebook_name": notebook_config['instance_name'],
+                   "Action": "Create new notebook server",
+                   "exploratory_url": [
+                       {"description": "Apache Zeppelin",
+                        "url": zeppelin_notebook_access_url},
+                       {"description": "Ungit",
+                        "url": zeppelin_ungit_access_url}#,
+                       #{"description": "Apache Zeppelin (via tunnel)",
+                       # "url": zeppelin_ip_url},
+                       #{"description": "Ungit (via tunnel)",
+                       # "url": ungit_ip_url}
+                   ]}
+            result.write(json.dumps(res))
+    except Exception as err:
+        dlab.fab.append_result("Failed to generate output information", str(err))
+        GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+        sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py b/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
index 622297a..ef8f4f8 100644
--- a/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
+++ b/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
@@ -51,6 +51,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_jupyter():
     try:
         sudo('systemctl stop jupyter-notebook')
@@ -67,6 +68,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_zeppelin():
     try:
         sudo('systemctl stop zeppelin-notebook')
@@ -81,6 +83,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_rstudio():
     try:
         remove_os_pkg(['rstudio-server'])
@@ -90,6 +93,7 @@
         print('Error:', str(err))
         sys.exit(1)
 
+
 def clean_tensor():
     try:
         clean_jupyter()
@@ -100,6 +104,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 def clean_tensor_rstudio():
     try:
         clean_rstudio()
@@ -110,6 +115,7 @@
         print('Error: {0}'.format(err))
         sys.exit(1)
 
+
 if __name__ == "__main__":
     print('Configure connections')
     env['connection_attempts'] = 100
@@ -117,19 +123,19 @@
     env.host_string = args.os_user + '@' + args.hostname
 
     if os.environ['conf_cloud_provider'] == 'azure':
-         de_master_name = '{}-{}-de-{}-{}-m'.format(
-            os.environ['conf_service_base_name'],
-            os.environ['project_name'].replace("_", "-"),
-            os.environ['exploratory_name'].replace("_", "-"),
-            os.environ['computational_name'].replace("_", "-"))
-         de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
-            de_master_name)
-         default_ami_id = 'default'
-    else:
-        de_master_name = '{}-{}-de-{}-{}-m'.format(
+        de_master_name = '{}-{}-{}-de-{}-m'.format(
             os.environ['conf_service_base_name'],
             os.environ['project_name'],
-            os.environ['exploratory_name'],
+            os.environ['endpoint_name'],
+            os.environ['computational_name'])
+        de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
+                                                   de_master_name)
+        default_ami_id = 'default'
+    else:
+        de_master_name = '{}-{}-{}-de-{}-m'.format(
+            os.environ['conf_service_base_name'],
+            os.environ['project_name'],
+            os.environ['endpoint_name'],
             os.environ['computational_name'])
         de_ami_id = get_ami_id_by_instance_name(de_master_name)
         default_ami_id = get_ami_id(
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
index 089e316..f8729f1 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
@@ -63,7 +63,7 @@
         try:
             data_engine['os_user'] = os.environ['conf_os_user']
             data_engine['service_base_name'] = os.environ['conf_service_base_name']
-            data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+            data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
             data_engine['cluster_name'] = os.environ['computational_id']
             data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
             data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
index 0cc5f6e..9f2b18b 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
@@ -46,7 +46,7 @@
         try:
             data_engine['os_user'] = os.environ['conf_os_user']
             data_engine['service_base_name'] = os.environ['conf_service_base_name']
-            data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+            data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
             data_engine['cluster_name'] = os.environ['computational_id']
             data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
             data_engine['master_ip'] = get_instance_private_ip_address(
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
index 006a313..425b12b 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
@@ -63,7 +63,7 @@
         try:
             data_engine['os_user'] = os.environ['conf_os_user']
             data_engine['service_base_name'] = os.environ['conf_service_base_name']
-            data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+            data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
             data_engine['cluster_name'] = os.environ['computational_id']
             data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
             data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py b/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
index 23f889f..eaf9ea5 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
@@ -45,8 +45,8 @@
         notebook_config = dict()
         notebook_config['notebook_name'] = os.environ['notebook_instance_name']
         notebook_config['os_user'] = os.environ['conf_os_user']
-        notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+        notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+        notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
         notebook_config['notebook_ip'] = get_instance_private_ip_address(
             notebook_config['tag_name'], notebook_config['notebook_name'])
         notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py b/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
index 59074f8..5e51179 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
@@ -44,8 +44,8 @@
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
             notebook_config['resource_type'] = os.environ['conf_resource']
-            notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py b/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
index 17abe27..b56157a 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
@@ -47,8 +47,8 @@
         try:
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
-            notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py b/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
index 839f3f9..820c818 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
@@ -46,8 +46,8 @@
         try:
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
-            notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py b/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
index 849333c..8bc607c 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
@@ -48,7 +48,7 @@
             notebook_config['notebook_name'] = os.environ['notebook_instance_name']
             notebook_config['os_user'] = os.environ['conf_os_user']
             notebook_config['service_base_name'] = os.environ['conf_service_base_name']
-            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+            notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
             notebook_config['notebook_ip'] = get_instance_private_ip_address(
                 notebook_config['tag_name'], notebook_config['notebook_name'])
             notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh b/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
index 90f0e5f..ff3e46d 100644
--- a/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
+++ b/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
@@ -40,4 +40,4 @@
 keytool -importcert -trustcacerts -alias step-ca -file /etc/ssl/certs/root_ca.crt -noprompt -storepass changeit -keystore JAVA_HOME/lib/security/cacerts
 
 # Restarting service
-supervisorctl restart provserv
\ No newline at end of file
+supervisorctl restart all
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py b/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
index 3cfa0ac..94ad123 100644
--- a/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
+++ b/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
@@ -38,7 +38,7 @@
 parser.add_argument('--os_user', type=str, default='')
 parser.add_argument('--scala_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -134,7 +134,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py b/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
index 5f56ea8..1486ff3 100644
--- a/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
+++ b/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
@@ -39,7 +39,7 @@
 parser.add_argument('--os_user', type=str, default='')
 parser.add_argument('--scala_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 args = parser.parse_args()
 
@@ -113,4 +113,4 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
\ No newline at end of file
+    install_inactivity_checker(args.os_user, args.ip_address)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/project/fabfile.py b/infrastructure-provisioning/src/project/fabfile.py
index 5949469..385704e 100644
--- a/infrastructure-provisioning/src/project/fabfile.py
+++ b/infrastructure-provisioning/src/project/fabfile.py
@@ -45,13 +45,6 @@
         append_result("Failed preparing Project.", str(err))
         sys.exit(1)
 
-#    try:
-#        local("~/scripts/{}.py".format('edge_prepare'))
-#    except Exception as err:
-#        traceback.print_exc()
-#        append_result("Failed preparing Edge node.", str(err))
-#        sys.exit(1)
-
     try:
         local("~/scripts/{}.py".format('edge_configure'))
     except Exception as err:
diff --git a/infrastructure-provisioning/src/project/scripts/configure_keycloak.py b/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
index a40bad2..80e7501 100644
--- a/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
+++ b/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
@@ -65,10 +65,11 @@
         keycloak_client_name = "{0}-{1}-{2}".format(args.service_base_name, args.project_name, args.endpoint_name)
         keycloak_client_id = str(uuid.uuid4())
         if args.hostname == '':
-            keycloak_redirectUris = 'https://{0}/*,http://{0}/*'.format(args.edge_public_ip).split(',')
+            keycloak_redirectUris = 'https://{0}/*,http://{0}/*'.format(args.edge_public_ip).lower().split(',')
             print(keycloak_redirectUris)
         else:
-            keycloak_redirectUris = 'https://{0}/*,http://{0}/*,https://{1}/*,http://{1}/*'.format(args.edge_public_ip, args.hostname).split(',')
+            keycloak_redirectUris = 'https://{0}/*,http://{0}/*,https://{1}/*,http://{1}/*'.format(
+                args.edge_public_ip, args.hostname).lower().split(',')
         keycloak_client_data = {
             "clientId": keycloak_client_name,
             "id": keycloak_client_id,
diff --git a/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py b/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
index a2878b8..34fb007 100644
--- a/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
+++ b/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
@@ -40,7 +40,7 @@
 parser.add_argument('--rstudio_pass', type=str, default='')
 parser.add_argument('--rstudio_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -113,7 +113,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress, True)
+    install_inactivity_checker(args.os_user, args.ip_address, True)
 
     #POST INSTALLATION PROCESS
     print("Updating pyOpenSSL library")
diff --git a/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json b/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
index 58cadb3..305c46b 100644
--- a/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
+++ b/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
@@ -1,7 +1,7 @@
 [
   {
     "_id": "nbShapes_Standard_NC6_fetching",
-    "description": "Allow to use Standard_NC6 instance shape for notebook",
+    "description": "Use Standard_NC6 instance shape for notebook",
     "exploratory_shapes": [
       "Standard_NC6"
     ],
diff --git a/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py b/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
index a156f2f..7a1a359 100644
--- a/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
+++ b/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
@@ -42,7 +42,7 @@
 parser.add_argument('--edge_instance_private_ip', type=str, default='')
 parser.add_argument('--edge_instance_public_ip', type=str, default='')
 parser.add_argument('--superset_name', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 args = parser.parse_args()
 
 gitlab_certfile = os.environ['conf_gitlab_certfile']
@@ -81,7 +81,7 @@
 
         # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # PREPARE SUPERSET
     try:
diff --git a/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py b/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
index fb579ad..93d8b55 100644
--- a/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
+++ b/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
@@ -40,7 +40,7 @@
 parser.add_argument('--rstudio_pass', type=str, default='')
 parser.add_argument('--rstudio_version', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -131,7 +131,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # POST INSTALLATION PROCESS
     print("Updating pyOpenSSL library")
diff --git a/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py b/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
index 43e220e..b595d9e 100644
--- a/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
+++ b/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
@@ -37,7 +37,7 @@
 parser.add_argument('--keyfile', type=str, default='')
 parser.add_argument('--region', type=str, default='')
 parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -134,7 +134,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py b/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
index 9a8be88..fda8b1f 100644
--- a/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
+++ b/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
@@ -50,7 +50,7 @@
 parser.add_argument('--multiple_clusters', type=str, default='')
 parser.add_argument('--r_mirror', type=str, default='')
 parser.add_argument('--endpoint_url', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
 parser.add_argument('--exploratory_name', type=str, default='')
 parser.add_argument('--edge_ip', type=str, default='')
 args = parser.parse_args()
@@ -256,7 +256,7 @@
 
     # INSTALL INACTIVITY CHECKER
     print("Install inactivity checker")
-    install_inactivity_checker(args.os_user, args.ip_adress)
+    install_inactivity_checker(args.os_user, args.ip_address)
 
     # INSTALL OPTIONAL PACKAGES
     if os.environ['notebook_r_enabled'] == 'true':
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
index 1c7117f..aac7afb 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
@@ -28,7 +28,7 @@
   source_instance_id = var.source_instance_id
   tags {
     Name             = local.ami_name
-    "${var.sbn}-Tag" = local.ami_name
+    "${var.sbn}-tag" = local.ami_name
     Product          = var.product
     Project_name     = var.project_name
     Project_tag      = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
index 6624f30..259bb6c 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
@@ -46,7 +46,7 @@
   tags = {
     Name             = local.role_name
     Environment_tag  = var.sbn
-    "${var.sbn}-Tag" = local.role_name
+    "${var.sbn}-tag" = local.role_name
     Product          = var.product
     Project_name     = var.project_name
     Project_tag      = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
index 297cf28..2b3c1fb 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
@@ -30,7 +30,7 @@
 
   tags = {
     Name             = local.subnet_name
-    "${var.sbn}-Tag" = local.subnet_name
+    "${var.sbn}-tag" = local.subnet_name
     Product          = var.product
     Project_name     = var.project_name
     Project_tag      = var.project_tag
@@ -68,7 +68,7 @@
 
   tags = {
     Name             = local.sg_name
-    "${var.sbn}-Tag" = local.sg_name
+    "${var.sbn}-tag" = local.sg_name
     Product          = var.product
     Project_name     = var.project_name
     Project_tag      = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
index 7601e35..12532c1 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
@@ -35,7 +35,7 @@
     Name                     = "${local.cluster_name}-m"
     Type                     = "master"
     dataengine_notebook_name = local.notebook_name
-    "${var.sbn}-Tag"         = "${local.cluster_name}-m"
+    "${var.sbn}-tag"         = "${local.cluster_name}-m"
     Product                  = var.product
     Project_name             = var.project_name
     Project_tag              = var.project_tag
@@ -59,7 +59,7 @@
     Name                     = "${local.cluster_name}-s${count.index + 1}"
     Type                     = "slave"
     dataengine_notebook_name = local.notebook_name
-    "${var.sbn}-Tag"         = "${local.cluster_name}-s${count.index + 1}"
+    "${var.sbn}-tag"         = "${local.cluster_name}-s${count.index + 1}"
     Product                  = var.product
     Project_name             = var.project_name
     Project_tag              = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
index 4a03b2d..10f5506 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
@@ -63,7 +63,7 @@
     Name                     = local.cluster_name
     Notebook                 = local.notebook_name
     Product                  = var.product
-    "${var.sbn}-Tag"         = local.cluster_name
+    "${var.sbn}-tag"         = local.cluster_name
     Project_name             = var.project_name
     Project_tag              = var.project_tag
     User_tag                 = var.user_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
index 374d6da..64d1d4f 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
@@ -32,7 +32,7 @@
   iam_instance_profile = var.iam_profile_name
   tags = {
     Name             = local.node_name
-    "${var.sbn}-Tag" = local.node_name
+    "${var.sbn}-tag" = local.node_name
     Project_name     = var.project_name
     Project_tag      = var.project_tag
     Endpoint_Tag     = var.endpoint_tag
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf
new file mode 100644
index 0000000..8a930e0
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf
@@ -0,0 +1,37 @@
+# *****************************************************************************
+ #
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing,
+ # software distributed under the License is distributed on an
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ # KIND, either express or implied.  See the License for the
+ # specific language governing permissions and limitations
+ # under the License.
+ #
+ # ******************************************************************************
+
+ locals {
+   shared_s3_name = "${var.service_base_name}-${var.endpoint_id}-shared-bucket"
+ }
+
+ resource "aws_s3_bucket" "shared_bucket" {
+   bucket = local.shared_s3_name
+   acl    = "private"
+   tags   = {
+     Name                           = local.shared_s3_name
+     "${local.additional_tag[0]}"   = local.additional_tag[1]
+     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.shared_s3_name}"
+     "${var.service_base_name}-tag" = local.shared_s3_name
+     "endpoint_tag"                 = var.endpoint_id
+   }
+   force_destroy = true
+ }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
index 49d2353..e4c1e69 100644
--- a/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
@@ -36,7 +36,7 @@
     Name = local.endpoint_role_name
     "${local.additional_tag[0]}" = local.additional_tag[1]
     "${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_role_name}"
-    "${var.service_base_name}-Tag" = local.endpoint_role_name
+    "${var.service_base_name}-tag" = local.endpoint_role_name
   }
 }
 
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
index 5c023f5..6bfc09b 100644
--- a/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
@@ -39,7 +39,7 @@
     Name                           = local.endpoint_instance_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_instance_name}"
-    "${var.service_base_name}-Tag" = local.endpoint_instance_name
+    "${var.service_base_name}-tag" = local.endpoint_instance_name
     endpoint_id                    = var.endpoint_id
   }
 }
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
index 15dceaa..eea071b 100644
--- a/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
@@ -22,10 +22,10 @@
 locals {
   endpoint_subnet_name       = "${var.service_base_name}-${var.endpoint_id}-subnet"
   endpoint_sg_name           = "${var.service_base_name}-${var.endpoint_id}-sg"
-  endpoint_vpc_name          = "${var.service_base_name}-endpoint-vpc"
+  endpoint_vpc_name          = "${var.service_base_name}-${var.endpoint_id}-vpc"
   additional_tag             = split(":", var.additional_tag)
   endpoint_igw_name          = "${var.service_base_name}-${var.endpoint_id}-igw"
-  endpoint_ip_name           = "${var.service_base_name}-${var.endpoint_id}-eip"
+  endpoint_ip_name           = "${var.service_base_name}-${var.endpoint_id}-static-ip"
   projects_rt                = "${var.service_base_name}-${var.endpoint_id}-project-rt"
 }
 
@@ -40,7 +40,7 @@
     Name                              = local.endpoint_vpc_name
     "${local.additional_tag[0]}"      = local.additional_tag[1]
     "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_vpc_name}"
-    "${var.service_base_name}-Tag"    = local.endpoint_vpc_name
+    "${var.service_base_name}-tag"    = local.endpoint_vpc_name
   }
 }
 
@@ -55,7 +55,7 @@
     Name                           = local.endpoint_igw_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_igw_name}"
-    "${var.service_base_name}-Tag" = local.endpoint_igw_name
+    "${var.service_base_name}-tag" = local.endpoint_igw_name
   }
 }
 
@@ -67,7 +67,7 @@
     Name                           = local.endpoint_subnet_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_subnet_name}"
-    "${var.service_base_name}-Tag" = local.endpoint_subnet_name
+    "${var.service_base_name}-tag" = local.endpoint_subnet_name
   }
   count = var.vpc_id == "" ? 1 : 0
 }
@@ -132,7 +132,7 @@
     Name                           = local.endpoint_sg_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_sg_name}"
-    "${var.service_base_name}-Tag" = local.endpoint_sg_name
+    "${var.service_base_name}-tag" = local.endpoint_sg_name
   }
 }
 
@@ -142,7 +142,7 @@
     Name                           = local.endpoint_ip_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.endpoint_ip_name}"
-    "${var.service_base_name}-Tag" = local.endpoint_ip_name
+    "${var.service_base_name}-tag" = local.endpoint_ip_name
   }
 }
 
@@ -152,8 +152,8 @@
     Name                           = local.projects_rt
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.projects_rt}"
-    "${var.service_base_name}-Tag" = local.projects_rt
-    "${var.service_base_name}-Tag" = var.service_base_name
+    "${var.service_base_name}-tag" = local.projects_rt
+    "${var.service_base_name}-tag" = var.service_base_name
   }
 }
 
diff --git a/infrastructure-provisioning/terraform/aws/project/main/iam.tf b/infrastructure-provisioning/terraform/aws/project/main/iam.tf
index 42fc02b..5aa83e2 100644
--- a/infrastructure-provisioning/terraform/aws/project/main/iam.tf
+++ b/infrastructure-provisioning/terraform/aws/project/main/iam.tf
@@ -50,7 +50,7 @@
     Name = "${local.edge_role_name}"
     "${local.additional_tag[0]}" = local.additional_tag[1]
     "${var.tag_resource_id}" = "${var.service_base_name}:${local.edge_role_name}"
-    "${var.service_base_name}-Tag" = local.edge_role_name
+    "${var.service_base_name}-tag" = local.edge_role_name
   }
 }
 
@@ -80,7 +80,7 @@
   tags = {
     Name                           = local.nb_role_name
     Environment_tag                = var.service_base_name
-    "${var.service_base_name}-Tag" = local.nb_role_name
+    "${var.service_base_name}-tag" = local.nb_role_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     Project_name                   = var.project_name
     Project_tag                    = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/project/main/instance.tf b/infrastructure-provisioning/terraform/aws/project/main/instance.tf
index 7b4cddc..1220743 100644
--- a/infrastructure-provisioning/terraform/aws/project/main/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/project/main/instance.tf
@@ -39,7 +39,7 @@
     Name                           = local.edge_instance_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.edge_instance_name}"
-    "${var.service_base_name}-Tag" = local.edge_instance_name
+    "${var.service_base_name}-tag" = local.edge_instance_name
     "Endpoint_tag"                 = var.endpoint_tag
   }
 }
diff --git a/infrastructure-provisioning/terraform/aws/project/main/network.tf b/infrastructure-provisioning/terraform/aws/project/main/network.tf
index d1064cd..aac8339 100644
--- a/infrastructure-provisioning/terraform/aws/project/main/network.tf
+++ b/infrastructure-provisioning/terraform/aws/project/main/network.tf
@@ -38,7 +38,7 @@
     Name                           = local.edge_ip_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.edge_ip_name}"
-    "${var.service_base_name}-Tag" = local.edge_ip_name
+    "${var.service_base_name}-tag" = local.edge_ip_name
   }
 }
 
@@ -211,7 +211,7 @@
     Name                           = local.edge_sg_name
     "${local.additional_tag[0]}"   = local.additional_tag[1]
     "${var.tag_resource_id}"       = "${var.service_base_name}:${local.edge_sg_name}"
-    "${var.service_base_name}-Tag" = local.edge_sg_name
+    "${var.service_base_name}-tag" = local.edge_sg_name
   }
 }
 
@@ -225,7 +225,7 @@
 
   tags = {
     Name                         = local.nb_subnet_name
-    "${local.sbn}-Tag"           = local.nb_subnet_name
+    "${local.sbn}-tag"           = local.nb_subnet_name
     "${local.additional_tag[0]}" = local.additional_tag[1]
     Project_name                 = var.project_name
     Project_tag                  = var.project_tag
@@ -263,7 +263,7 @@
 
   tags = {
     Name                         = local.sg_name
-    "${local.sbn}-Tag"           = local.sg_name
+    "${local.sbn}-tag"           = local.sg_name
     "${local.additional_tag[0]}" = local.additional_tag[1]
     Project_name                 = var.project_name
     Project_tag                  = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
index 792c950..41dfb20 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
@@ -21,8 +21,8 @@
 
 locals {
   subnet_c_id                      = data.aws_subnet.k8s-subnet-c-data == [] ? "" : data.aws_subnet.k8s-subnet-c-data.0.id
-  ssn_k8s_launch_conf_masters_name = "${var.service_base_name}-ssn-launch-conf-masters"
-  ssn_k8s_launch_conf_workers_name = "${var.service_base_name}-ssn-launch-conf-workers"
+  ssn_k8s_launch_conf_masters_name = "${var.service_base_name}-ssn-lc-masters"
+  ssn_k8s_launch_conf_workers_name = "${var.service_base_name}-ssn-lc-workers"
   ssn_k8s_ag_masters_name          = "${var.service_base_name}-ssn-masters"
   ssn_k8s_ag_workers_name          = "${var.service_base_name}-ssn-workers"
   cluster_name                     = "${var.service_base_name}-k8s-cluster"
@@ -122,7 +122,7 @@
       propagate_at_launch = true
     },
     {
-      key                 = "${var.service_base_name}-Tag"
+      key                 = "${var.service_base_name}-tag"
       value               = local.ssn_k8s_ag_masters_name
       propagate_at_launch = true
     },
@@ -162,7 +162,7 @@
       propagate_at_launch = true
     },
     {
-      key                 = "${var.service_base_name}-Tag"
+      key                 = "${var.service_base_name}-tag"
       value               = local.ssn_k8s_ag_workers_name
       propagate_at_launch = true
     },
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
index 4000cdd..bd0baf8 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
@@ -36,7 +36,7 @@
     Name                                          = local.ssn_nlb_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_nlb_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_nlb_name
+    "${var.service_base_name}-tag"                = local.ssn_nlb_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
@@ -50,7 +50,7 @@
     Name                                          = local.ssn_k8s_nlb_api_tg_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_k8s_nlb_api_tg_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_k8s_nlb_api_tg_name
+    "${var.service_base_name}-tag"                = local.ssn_k8s_nlb_api_tg_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
@@ -64,7 +64,7 @@
     Name                                          = local.ssn_k8s_nlb_step_ca_tg_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_k8s_nlb_step_ca_tg_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_k8s_nlb_step_ca_tg_name
+    "${var.service_base_name}-tag"                = local.ssn_k8s_nlb_step_ca_tg_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
index 79c5969..e01b1d6 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
@@ -41,7 +41,7 @@
     Name                                          = local.ssn_role_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_role_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_role_name
+    "${var.service_base_name}-tag"                = local.ssn_role_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
index 622956b..f91e6ca 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
@@ -30,7 +30,7 @@
     Name                                          = local.ssn_s3_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_s3_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_s3_name
+    "${var.service_base_name}-tag"                = local.ssn_s3_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
   force_destroy = true
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
index 54c1648..b9f7fa8 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
@@ -59,7 +59,7 @@
     Name                                          = local.ssn_sg_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_sg_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_sg_name
+    "${var.service_base_name}-tag"                = local.ssn_sg_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
index 2ce9d08..699dfcd 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
@@ -21,8 +21,8 @@
 
 locals {
   additional_tag       = split(":", var.additional_tag)
-  ssn_vpc_name      = "${var.service_base_name}-ssn-vpc"
-  ssn_igw_name      = "${var.service_base_name}-ssn-igw"
+  ssn_vpc_name      = "${var.service_base_name}-vpc"
+  ssn_igw_name      = "${var.service_base_name}-igw"
   ssn_subnet_a_name = "${var.service_base_name}-ssn-subnet-az-a"
   ssn_subnet_b_name = "${var.service_base_name}-ssn-subnet-az-b"
   ssn_subnet_c_name = "${var.service_base_name}-ssn-subnet-az-c"
@@ -41,7 +41,7 @@
     Name                                          = local.ssn_vpc_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_vpc_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_vpc_name
+    "${var.service_base_name}-tag"                = local.ssn_vpc_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
@@ -54,7 +54,7 @@
     Name                                          = local.ssn_igw_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_igw_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_igw_name
+    "${var.service_base_name}-tag"                = local.ssn_igw_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
@@ -81,7 +81,7 @@
     Name                                          = local.ssn_subnet_a_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_subnet_a_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_subnet_a_name
+    "${var.service_base_name}-tag"                = local.ssn_subnet_a_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
@@ -97,7 +97,7 @@
     Name                                          = local.ssn_subnet_b_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_subnet_b_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_subnet_b_name
+    "${var.service_base_name}-tag"                = local.ssn_subnet_b_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
@@ -113,7 +113,7 @@
     Name                                          = local.ssn_subnet_c_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.ssn_subnet_c_name}"
-    "${var.service_base_name}-Tag"                = local.ssn_subnet_c_name
+    "${var.service_base_name}-tag"                = local.ssn_subnet_c_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
@@ -135,7 +135,7 @@
   vpc_id = data.aws_vpc.ssn_k8s_vpc_data.id
   tags = {
     Name                                          = local.endpoint_rt_name
-    "${var.service_base_name}-Tag"                = var.service_base_name
+    "${var.service_base_name}-tag"                = var.service_base_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.endpoint_rt_name}"
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
@@ -149,7 +149,7 @@
     Name                                          = local.endpoint_s3_name
     "${local.additional_tag[0]}"                  = local.additional_tag[1]
     "${var.tag_resource_id}"                      = "${var.service_base_name}:${local.endpoint_s3_name}"
-    "${var.service_base_name}-Tag"                = local.endpoint_s3_name
+    "${var.service_base_name}-tag"                = local.endpoint_s3_name
     "kubernetes.io/cluster/${local.cluster_name}" = "owned"
   }
 }
diff --git a/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf b/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
index 470e474..d76a16c 100644
--- a/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
@@ -57,7 +57,7 @@
     vm_size               = var.master_shape
 
     storage_os_disk {
-        name              = "${local.cluster_name}-m-disk0"
+        name              = "${local.cluster_name}-m-volume-primary"
         caching           = "ReadWrite"
         create_option     = "FromImage"
         managed_disk_type = "Premium_LRS"
@@ -129,7 +129,7 @@
     vm_size               = var.slave_shape
 
     storage_os_disk {
-        name              = "${local.notebook_name}-s-${count.index + 1}-disk0"
+        name              = "${local.notebook_name}-s-${count.index + 1}-volume-primary"
         caching           = "ReadWrite"
         create_option     = "FromImage"
         managed_disk_type = "Premium_LRS"
diff --git a/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf b/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
index 0283038..70e1db5 100644
--- a/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
@@ -58,7 +58,7 @@
     vm_size               = var.instance_type
 
     storage_os_disk {
-        name              = "${local.node_name}-disk0"
+        name              = "${local.node_name}-volume-primary"
         caching           = "ReadWrite"
         create_option     = "FromImage"
         managed_disk_type = "Premium_LRS"
@@ -106,7 +106,7 @@
     vm_size               = var.instance_type
 
     storage_os_disk {
-        name              = "${local.node_name}-disk0"
+        name              = "${local.node_name}-volume-primary"
         caching           = "ReadWrite"
         create_option     = "FromImage"
         managed_disk_type = "Premium_LRS"
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf
new file mode 100644
index 0000000..a44a37f
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf
@@ -0,0 +1,53 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+ shared_bucket_name = lower("${var.service_base_name}-${var.endpoint_id}-shared-bucket")
+}
+
+resource "random_string" "shared_bucket_service_name" {
+  length  = 10
+  special = false
+  lower   = true
+  upper   = false
+}
+
+resource "azurerm_storage_account" "shared-endpoint-storage-account" {
+  name                     = random_string.shared_bucket_service_name.result
+  resource_group_name      = data.azurerm_resource_group.data-endpoint-resource-group.name
+  location                 = data.azurerm_resource_group.data-endpoint-resource-group.location
+  account_tier             = "Standard"
+  account_replication_type = "LRS"
+  account_kind             = "BlobStorage"
+
+  tags = {
+    Name                              = local.shared_bucket_name
+    "${local.additional_tag[0]}"      = local.additional_tag[1]
+    "${var.service_base_name}-tag"    = local.shared_bucket_name
+    "endpoint_tag"                    = var.endpoint_id
+  }
+}
+
+resource "azurerm_storage_container" "shared-endpoint-storage-container" {
+  name                  = local.shared_bucket_name
+  storage_account_name  = azurerm_storage_account.shared-endpoint-storage-account.name
+  container_access_type = "private"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
index 989c32c..82c1497 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
@@ -21,7 +21,7 @@
 
 locals {
   endpoint_instance_name      = "${var.service_base_name}-${var.endpoint_id}-endpoint"
-  endpoint_instance_disk_name = "${var.service_base_name}-${var.endpoint_id}-endpoint-disk"
+  endpoint_instance_disk_name = "${var.service_base_name}-${var.endpoint_id}-endpoint-volume"
 }
 
 data "tls_public_key" "enpoint_key" {
@@ -65,6 +65,6 @@
     Name                              = local.endpoint_instance_name
     "${local.additional_tag[0]}"      = local.additional_tag[1]
     "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_instance_name}"
-    "${var.service_base_name}-Tag"    = local.endpoint_instance_name
+    "${var.service_base_name}-tag"    = local.endpoint_instance_name
   }
 }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
index b6ed11e..cbf2187 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
@@ -20,7 +20,8 @@
 # ******************************************************************************
 
 locals {
-  json_data = jsondecode(file(var.auth_file_path))
+  resource_group_name = "${var.service_base_name}-${var.endpoint_id}-resource-group"
+  json_data           = jsondecode(file(var.auth_file_path))
 }
 
 provider "azurerm" {
@@ -32,8 +33,8 @@
 }
 
 resource "azurerm_resource_group" "endpoint-resource-group" {
-  count  = var.resource_group_name == "" ? 1 : 0
-  name     = var.service_base_name
+  count    = var.resource_group_name == "" ? 1 : 0
+  name     = local.resource_group_name
   location = var.region
 
   tags = {
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
index 520b221..738f062 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
@@ -21,9 +21,9 @@
 
 locals {
   endpoint_subnet_name       = "${var.service_base_name}-${var.endpoint_id}-subnet"
-  endpoint_vpc_name          = "${var.service_base_name}-endpoint-vpc"
+  endpoint_vpc_name          = "${var.service_base_name}-${var.endpoint_id}-vpc"
   additional_tag             = split(":", var.additional_tag)
-  endpoint_ip_name           = "${var.service_base_name}-${var.endpoint_id}-eip"
+  endpoint_ip_name           = "${var.service_base_name}-${var.endpoint_id}-static-ip"
   endpoint_nif_name          = "${var.service_base_name}-${var.endpoint_id}-nif"
 }
 
@@ -38,7 +38,7 @@
     Name                              = local.endpoint_vpc_name
     "${local.additional_tag[0]}"      = local.additional_tag[1]
     "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_vpc_name}"
-    "${var.service_base_name}-Tag"    = local.endpoint_vpc_name
+    "${var.service_base_name}-tag"    = local.endpoint_vpc_name
   }
 }
 
@@ -71,7 +71,7 @@
     Name                              = local.endpoint_ip_name
     "${local.additional_tag[0]}"      = local.additional_tag[1]
     "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_ip_name}"
-    "${var.service_base_name}-Tag"    = local.endpoint_ip_name
+    "${var.service_base_name}-tag"    = local.endpoint_ip_name
   }
 }
 
@@ -92,7 +92,7 @@
     Name                              = local.endpoint_nif_name
     "${local.additional_tag[0]}"      = local.additional_tag[1]
     "${var.tag_resource_id}"          = "${var.service_base_name}:${local.endpoint_nif_name}"
-    "${var.service_base_name}-Tag"    = local.endpoint_nif_name
+    "${var.service_base_name}-tag"    = local.endpoint_nif_name
   }
 }
 
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
index 793917d..c005b29 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
@@ -33,4 +33,12 @@
 
 output "ssn_k8s_sg_id" {
   value = azurerm_network_security_group.enpoint-sg.name
+}
+
+output "endpoint_id" {
+  value = var.endpoint_id
+}
+
+output "resource_group_name" {
+  value = data.azurerm_resource_group.data-endpoint-resource-group.name
 }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/project/main/instance.tf b/infrastructure-provisioning/terraform/azure/project/main/instance.tf
index d7ec3aa..34cd26b 100644
--- a/infrastructure-provisioning/terraform/azure/project/main/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/project/main/instance.tf
@@ -59,7 +59,7 @@
     vm_size               = var.instance_type
 
     storage_os_disk {
-        name              = "${local.node_name}-disk0"
+        name              = "${local.node_name}-volume-primary"
         caching           = "ReadWrite"
         create_option     = "FromImage"
         managed_disk_type = "Premium_LRS"
diff --git a/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
index c77f9e7..f09731c 100644
--- a/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
+++ b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
@@ -357,7 +357,7 @@
             conn.put('./provisioning.yml', '{}provisioning.yml'
                      .format(dlab_conf_dir))
             if args.resource_group_name == '':
-                args.resource_group_name = args.service_base_name
+                args.resource_group_name = '{}-{}-resource-group'.format(args.service_base_name, args.endpoint_id)
             if args.cloud_provider == 'azure':
                 args.region = args.region.lower().replace(' ', '')
             cloud_properties = [
@@ -951,6 +951,7 @@
     parser.add_argument('--dlab_path', type=str, default='/opt/dlab')
     parser.add_argument('--key_name', type=str, default='', help='Name of admin key without .pem extension')
     parser.add_argument('--endpoint_eip_address', type=str)
+    parser.add_argument('--endpoint_id', type=str, default='')
     parser.add_argument('--pkey', type=str, default='')
     parser.add_argument('--hostname', type=str, default='')
     parser.add_argument('--os_user', type=str, default='dlab-user')
diff --git a/infrastructure-provisioning/terraform/bin/dlab.py b/infrastructure-provisioning/terraform/bin/dlab.py
index 8129a8e..60af3b3 100644
--- a/infrastructure-provisioning/terraform/bin/dlab.py
+++ b/infrastructure-provisioning/terraform/bin/dlab.py
@@ -475,8 +475,8 @@
 
     def validate_params(self):
         params = self.parse_args()[self.terraform_args_group_name]
-        if len(params.get('service_base_name')) > 12:
-            sys.stderr.write('service_base_name length should be less then 12')
+        if len(params.get('service_base_name')) > 20:
+            sys.stderr.write('service_base_name length should be less then 20')
             sys.exit(1)
         if not re.match("^[a-z0-9\-]+$", params.get('service_base_name')):
             sys.stderr.write('service_base_name should contain only lowercase '
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf
new file mode 100644
index 0000000..9551d65
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf
@@ -0,0 +1,36 @@
+# *****************************************************************************
+ #
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing,
+ # software distributed under the License is distributed on an
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ # KIND, either express or implied.  See the License for the
+ # specific language governing permissions and limitations
+ # under the License.
+ #
+ # ******************************************************************************
+
+ locals {
+   shared_bucket_name = "${var.service_base_name}-${var.endpoint_id}-shared-bucket"
+   additional_tag  = split(":", var.additional_tag)
+ }
+
+ resource "google_storage_bucket" "shared_bucket" {
+   name     = local.shared_bucket_name
+   force_destroy = true
+   labels = {
+     name                              = local.shared_bucket_name
+     "${local.additional_tag[0]}"      = local.additional_tag[1]
+     "${var.service_base_name}-tag"    = local.shared_bucket_name
+     "endpoint_tag"                    = var.endpoint_id
+   }
+ }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
index 1cf97f3..65becf8 100644
--- a/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
@@ -22,8 +22,8 @@
 locals {
   vpc_id                = "${var.service_base_name}-${var.endpoint_id}-vpc"
   subnet_name           = "${var.service_base_name}-${var.endpoint_id}-subnet"
-  firewall_ingress_name = "${var.service_base_name}-${var.endpoint_id}-ing-rule"
-  firewall_egress_name  = "${var.service_base_name}-${var.endpoint_id}-eg-rule"
+  firewall_ingress_name = "${var.service_base_name}-${var.endpoint_id}-ingress-sg"
+  firewall_egress_name  = "${var.service_base_name}-${var.endpoint_id}-egress-sg"
 }
 
 resource "google_compute_network" "endpoint_vpc" {
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
index b1d1e7d..21bdf0a 100644
--- a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
@@ -21,7 +21,7 @@
 
 locals {
   additional_tag     = split(":", var.additional_tag)
-  gke_name           = "${var.service_base_name}-cluster"
+  gke_name           = "${var.service_base_name}-k8s-cluster"
   gke_node_pool_name = "${var.service_base_name}-node-pool"
 }
 
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
index 04f7ec7..c3bbdcb 100644
--- a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
@@ -20,8 +20,8 @@
 # ******************************************************************************
 
 locals {
-  ssn_vpc_name      = "${var.service_base_name}-ssn-vpc"
-  ssn_subnet_name   = "${var.service_base_name}-ssn-subnet"
+  ssn_vpc_name      = "${var.service_base_name}-vpc"
+  ssn_subnet_name   = "${var.service_base_name}-subnet"
 }
 
 resource "google_compute_network" "ssn_gke_vpc" {
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
index 3b0ef82..117f3ee 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
+++ b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
@@ -64,7 +64,7 @@
 
 		List<String> valuesT1 = new ArrayList<>();
 		valuesT1.add(instanceName + "*");
-		Filter filter = new Filter("tag:" + NamingHelper.getServiceBaseName() + "-Tag", valuesT1);
+		Filter filter = new Filter("tag:" + NamingHelper.getServiceBaseName() + "-tag", valuesT1);
 
 		DescribeInstancesRequest describeInstanceRequest = new DescribeInstancesRequest().withFilters(filter);
 		DescribeInstancesResult describeInstanceResult = ec2.describeInstances(describeInstanceRequest);
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/controller/BillingController.java b/services/billing-aws/src/main/java/com/epam/dlab/controller/BillingController.java
index 82d6d16..8f70083 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/controller/BillingController.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/controller/BillingController.java
@@ -44,7 +44,10 @@
     }
 
     @GetMapping("/report")
-    public ResponseEntity<List<BillingData>> getBilling() {
-        return new ResponseEntity<>(billingDAO.getBillingReport(), HttpStatus.OK);
+    public ResponseEntity<List<BillingData>> getBilling(@RequestParam("date-start") String dateStart,
+                                                        @RequestParam("date-end") String dateEnd,
+                                                        @RequestParam("dlab-id") String dlabId,
+                                                        @RequestParam("product") List<String> products) {
+        return new ResponseEntity<>(billingDAO.getBillingReport(dateStart, dateEnd, dlabId, products), HttpStatus.OK);
     }
 }
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/dao/BillingDAO.java b/services/billing-aws/src/main/java/com/epam/dlab/dao/BillingDAO.java
index 8ecaddf..f72fa99 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/dao/BillingDAO.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/dao/BillingDAO.java
@@ -25,7 +25,7 @@
 
 public interface BillingDAO {
 
-    List<BillingData> getBillingReport();
+    List<BillingData> getBillingReport(String dateStart, String dateEnd, String dlabId, List<String> products);
 
     List<BillingData> getBillingReport(List<String> dlabIds);
 }
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/dao/impl/BillingDAOImpl.java b/services/billing-aws/src/main/java/com/epam/dlab/dao/impl/BillingDAOImpl.java
index 4fe4606..b0ff9f1 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/dao/impl/BillingDAOImpl.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/dao/impl/BillingDAOImpl.java
@@ -23,9 +23,11 @@
 import com.epam.dlab.dto.billing.BillingData;
 import com.epam.dlab.exceptions.DlabException;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.StringUtils;
 import org.bson.Document;
 import org.springframework.data.mongodb.core.MongoTemplate;
 import org.springframework.data.mongodb.core.aggregation.Aggregation;
+import org.springframework.data.mongodb.core.aggregation.AggregationOperation;
 import org.springframework.data.mongodb.core.aggregation.GroupOperation;
 import org.springframework.data.mongodb.core.aggregation.MatchOperation;
 import org.springframework.data.mongodb.core.query.Criteria;
@@ -33,6 +35,7 @@
 
 import java.math.BigDecimal;
 import java.time.LocalDate;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import java.util.stream.Collectors;
@@ -56,10 +59,20 @@
     }
 
     @Override
-    public List<BillingData> getBillingReport() {
+    public List<BillingData> getBillingReport(String dateStart, String dateEnd, String dlabId, List<String> products) {
         try {
-            GroupOperation groupOperation = getGroupOperation();
-            Aggregation aggregation = newAggregation(groupOperation);
+            List<AggregationOperation> aggregationOperations = new ArrayList<>();
+            aggregationOperations.add(Aggregation.match(Criteria.where(FIELD_DLAB_ID).regex(dlabId, "i")));
+            if (!products.isEmpty()) {
+                aggregationOperations.add(Aggregation.match(Criteria.where(FIELD_PRODUCT).in(products)));
+            }
+            getMatchCriteria(dateStart, Criteria.where(FIELD_USAGE_DATE).gte(dateStart))
+                    .ifPresent(aggregationOperations::add);
+            getMatchCriteria(dateEnd, Criteria.where(FIELD_USAGE_DATE).lte(dateEnd))
+                    .ifPresent(aggregationOperations::add);
+            aggregationOperations.add(getGroupOperation());
+
+            Aggregation aggregation = newAggregation(aggregationOperations);
 
             return mongoTemplate.aggregate(aggregation, "billing", Document.class).getMappedResults()
                     .stream()
@@ -95,6 +108,12 @@
                 .sum(FIELD_COST).as(FIELD_COST);
     }
 
+    private Optional<MatchOperation> getMatchCriteria(String dateStart, Criteria criteria) {
+        return Optional.ofNullable(dateStart)
+                .filter(StringUtils::isNotEmpty)
+                .map(date -> Aggregation.match(criteria));
+    }
+
     private BillingData toBillingData(Document billingData) {
         return BillingData.builder()
                 .tag(billingData.getString(FIELD_DLAB_ID))
diff --git a/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java b/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java
index e0af4e6..994522d 100644
--- a/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java
+++ b/services/billing-aws/src/main/java/com/epam/dlab/mongo/AdapterMongoDb.java
@@ -31,12 +31,17 @@
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.mongodb.client.MongoCollection;
+import com.mongodb.client.model.UpdateOptions;
 import org.bson.Document;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.TreeSet;
 
+import static com.epam.dlab.mongo.MongoConstants.COLLECTION_SETTINGS;
+import static com.epam.dlab.mongo.MongoConstants.FIELD_SERIVICE_BASE_NAME;
+import static com.mongodb.client.model.Filters.eq;
+
 /**
  * The adapter for file system.
  */
@@ -67,6 +72,17 @@
 	@JsonProperty
 	private boolean upsert = false;
 
+	@JsonProperty
+	private String serviceBaseName;
+
+	public String getServiceBaseName() {
+		return serviceBaseName;
+	}
+
+	public void setServiceBaseName(String serviceBaseName) {
+		this.serviceBaseName = serviceBaseName;
+	}
+
 	/**
 	 * Return the size of buffer for bulk insert.
 	 */
@@ -142,6 +158,7 @@
 				throw new AdapterException("Mode of " + getType() + " adapter may be " + Mode.WRITE + " only.");
 			}
 			connection = new MongoDbConnection(getHost(), getPort(), getDatabase(), getUsername(), getPassword());
+			setServiceBaseName();
 			collection = connection.getCollection(MongoConstants.COLLECTION_BILLING);
 			try {
 				resourceTypeDAO = new DlabResourceTypeDAO(connection);
@@ -158,6 +175,12 @@
 		}
 	}
 
+	private void setServiceBaseName() {
+		connection.getCollection(COLLECTION_SETTINGS)
+				.updateOne(eq("_id", FIELD_SERIVICE_BASE_NAME), new Document("$set", new Document("value", serviceBaseName)),
+						new UpdateOptions().upsert(true));
+	}
+
 	@Override
 	public void close() throws AdapterException {
 		if (connection != null) {
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/controller/BillingController.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/controller/BillingController.java
index 53227a2..eb728c9 100644
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/controller/BillingController.java
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/controller/BillingController.java
@@ -44,7 +44,10 @@
     }
 
     @GetMapping("/report")
-    public ResponseEntity<List<BillingData>> getBilling() {
-        return new ResponseEntity<>(billingDAO.getBillingReport(), HttpStatus.OK);
+    public ResponseEntity<List<BillingData>> getBilling(@RequestParam("date-start") String dateStart,
+                                                        @RequestParam("date-end") String dateEnd,
+                                                        @RequestParam("dlab-id") String dlabId,
+                                                        @RequestParam("product") List<String> products) {
+        return new ResponseEntity<>(billingDAO.getBillingReport(dateStart, dateEnd, dlabId, products), HttpStatus.OK);
     }
 }
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/BillingDAO.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/BillingDAO.java
index 1f578b1..793d7cb 100644
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/BillingDAO.java
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/BillingDAO.java
@@ -25,7 +25,7 @@
 
 public interface BillingDAO {
 
-    List<BillingData> getBillingReport();
+    List<BillingData> getBillingReport(String dateStart, String dateEnd, String dlabId, List<String> products);
 
     List<BillingData> getBillingReport(List<String> dlabIds);
 }
diff --git a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/impl/BillingDAOImpl.java b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/impl/BillingDAOImpl.java
index 2e3da13..c39385a 100644
--- a/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/impl/BillingDAOImpl.java
+++ b/services/billing-azure/src/main/java/com/epam/dlab/billing/azure/dao/impl/BillingDAOImpl.java
@@ -24,8 +24,10 @@
 import com.epam.dlab.dto.billing.BillingData;
 import com.epam.dlab.exceptions.DlabException;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.StringUtils;
 import org.springframework.data.mongodb.core.MongoTemplate;
 import org.springframework.data.mongodb.core.aggregation.Aggregation;
+import org.springframework.data.mongodb.core.aggregation.AggregationOperation;
 import org.springframework.data.mongodb.core.aggregation.GroupOperation;
 import org.springframework.data.mongodb.core.aggregation.MatchOperation;
 import org.springframework.data.mongodb.core.query.Criteria;
@@ -33,6 +35,7 @@
 
 import java.math.BigDecimal;
 import java.time.LocalDate;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import java.util.stream.Collectors;
@@ -50,10 +53,20 @@
     }
 
     @Override
-    public List<BillingData> getBillingReport() {
+    public List<BillingData> getBillingReport(String dateStart, String dateEnd, String dlabId, List<String> products) {
         try {
-            GroupOperation groupOperation = getGroupOperation();
-            Aggregation aggregation = newAggregation(groupOperation);
+            List<AggregationOperation> aggregationOperations = new ArrayList<>();
+            aggregationOperations.add(Aggregation.match(Criteria.where("dlabId").regex(dlabId, "i")));
+            if (!products.isEmpty()) {
+                aggregationOperations.add(Aggregation.match(Criteria.where("meterCategory").in(products)));
+            }
+            getMatchCriteria(dateStart, Criteria.where("day").gte(dateStart))
+                    .ifPresent(aggregationOperations::add);
+            getMatchCriteria(dateEnd, Criteria.where("day").lte(dateEnd))
+                    .ifPresent(aggregationOperations::add);
+            aggregationOperations.add(getGroupOperation());
+
+            Aggregation aggregation = newAggregation(aggregationOperations);
 
             return mongoTemplate.aggregate(aggregation, "billing", AzureDailyResourceInvoice.class).getMappedResults()
                     .stream()
@@ -89,6 +102,12 @@
                 .sum("cost").as("cost");
     }
 
+    private Optional<MatchOperation> getMatchCriteria(String dateStart, Criteria criteria) {
+        return Optional.ofNullable(dateStart)
+                .filter(StringUtils::isNotEmpty)
+                .map(date -> Aggregation.match(criteria));
+    }
+
     private BillingData toBillingData(AzureDailyResourceInvoice billingData) {
         return BillingData.builder()
                 .tag(billingData.getDlabId())
diff --git a/services/billing-azure/src/main/resources/application.yml b/services/billing-azure/src/main/resources/application.yml
index ebc4dac..eba7073 100644
--- a/services/billing-azure/src/main/resources/application.yml
+++ b/services/billing-azure/src/main/resources/application.yml
@@ -46,7 +46,7 @@
   currency: <CURRENCY>
   locale: <LOCALE>
   regionInfo: <REGION_INFO>
-  initialDelay: 10
+  initialDelay: 1
   period: 60
   aggregationOutputMongoDataSource:
     host: localhost
diff --git a/services/billing-gcp/pom.xml b/services/billing-gcp/pom.xml
index e562089..43dff3b 100644
--- a/services/billing-gcp/pom.xml
+++ b/services/billing-gcp/pom.xml
@@ -98,6 +98,11 @@
             <artifactId>dlab-model</artifactId>
             <version>${project.parent.version}</version>
         </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+            <version>3.7</version>
+        </dependency>
     </dependencies>
 
     <build>
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/controller/BillingController.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/controller/BillingController.java
index 525be72..2967d2a 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/controller/BillingController.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/controller/BillingController.java
@@ -44,7 +44,10 @@
     }
 
     @GetMapping("/report")
-    public ResponseEntity<List<BillingData>> getBilling() {
-        return new ResponseEntity<>(billingDAO.getBillingReport(), HttpStatus.OK);
+    public ResponseEntity<List<BillingData>> getBilling(@RequestParam("date-start") String dateStart,
+                                                        @RequestParam("date-end") String dateEnd,
+                                                        @RequestParam("dlab-id") String dlabId,
+                                                        @RequestParam("product") List<String> products) {
+        return new ResponseEntity<>(billingDAO.getBillingReport(dateStart, dateEnd, dlabId, products), HttpStatus.OK);
     }
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java
index 7288614..430ade7 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/BillingDAO.java
@@ -28,7 +28,7 @@
 
     List<GcpBillingData> getBillingData() throws InterruptedException;
 
-    List<BillingData> getBillingReport();
+    List<BillingData> getBillingReport(String dateStart, String dateEnd, String dlabId, List<String> products);
 
     List<BillingData> getBillingReport(List<String> dlabIds);
 }
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java
index ee917b5..6db993a 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/dao/impl/BigQueryBillingDAO.java
@@ -33,9 +33,11 @@
 import com.google.cloud.bigquery.Table;
 import com.google.cloud.bigquery.TableInfo;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.StringUtils;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.data.mongodb.core.MongoTemplate;
 import org.springframework.data.mongodb.core.aggregation.Aggregation;
+import org.springframework.data.mongodb.core.aggregation.AggregationOperation;
 import org.springframework.data.mongodb.core.aggregation.GroupOperation;
 import org.springframework.data.mongodb.core.aggregation.MatchOperation;
 import org.springframework.data.mongodb.core.query.Criteria;
@@ -46,8 +48,10 @@
 import java.time.LocalDate;
 import java.time.ZoneId;
 import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
@@ -104,10 +108,20 @@
 	}
 
 	@Override
-	public List<BillingData> getBillingReport() {
+	public List<BillingData> getBillingReport(String dateStart, String dateEnd, String dlabId, List<String> products) {
 		try {
-			GroupOperation groupOperation = getGroupOperation();
-			Aggregation aggregation = newAggregation(groupOperation);
+			List<AggregationOperation> aggregationOperations = new ArrayList<>();
+			aggregationOperations.add(Aggregation.match(Criteria.where("dlabId").regex(dlabId, "i")));
+			if (!products.isEmpty()) {
+				aggregationOperations.add(Aggregation.match(Criteria.where("product").in(products)));
+			}
+			getMatchCriteria(dateStart, Criteria.where("usage_date").gte(dateStart))
+					.ifPresent(aggregationOperations::add);
+			getMatchCriteria(dateEnd, Criteria.where("usage_date").lte(dateEnd))
+					.ifPresent(aggregationOperations::add);
+			aggregationOperations.add(getGroupOperation());
+
+			Aggregation aggregation = newAggregation(aggregationOperations);
 
 			return mongoTemplate.aggregate(aggregation, "billing", GcpBillingData.class).getMappedResults()
 					.stream()
@@ -137,12 +151,18 @@
 	}
 
 	private GroupOperation getGroupOperation() {
-		return group("product", "currency", "usageType", "dlabId")
+		return group("product", "currency", "dlabId")
 				.min("from").as("from")
 				.max("to").as("to")
 				.sum("cost").as("cost");
 	}
 
+	private Optional<MatchOperation> getMatchCriteria(String dateStart, Criteria criteria) {
+		return Optional.ofNullable(dateStart)
+				.filter(StringUtils::isNotEmpty)
+				.map(date -> Aggregation.match(criteria));
+	}
+
 	private Stream<? extends GcpBillingData> bigQueryResultSetStream(Table table) {
 		try {
 			final String tableName = table.getTableId().getTable();
@@ -166,7 +186,7 @@
 		return GcpBillingData.builder()
 				.usageDateFrom(toLocalDate(fields, "usage_date_from"))
 				.usageDateTo(toLocalDate(fields, "usage_date_to"))
-				.cost(fields.get("cost").getNumericValue().setScale(3, BigDecimal.ROUND_HALF_UP))
+				.cost(fields.get("cost").getNumericValue().doubleValue())
 				.product(fields.get("product").getStringValue())
 				.usageType(fields.get("usageType").getStringValue())
 				.currency(fields.get("currency").getStringValue())
@@ -186,7 +206,7 @@
 				.usageDateTo(billingData.getUsageDateTo())
 				.product(billingData.getProduct())
 				.usageType(billingData.getUsageType())
-				.cost(billingData.getCost().setScale(3, BigDecimal.ROUND_HALF_UP).doubleValue())
+				.cost(BigDecimal.valueOf(billingData.getCost()).setScale(3, BigDecimal.ROUND_HALF_UP).doubleValue())
 				.currency(billingData.getCurrency())
 				.tag(billingData.getTag())
 				.build();
diff --git a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java
index d688198..a2bd12b 100644
--- a/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java
+++ b/services/billing-gcp/src/main/java/com/epam/dlab/billing/gcp/model/GcpBillingData.java
@@ -24,7 +24,6 @@
 import org.springframework.data.mongodb.core.mapping.Document;
 import org.springframework.data.mongodb.core.mapping.Field;
 
-import java.math.BigDecimal;
 import java.time.LocalDate;
 
 @Data
@@ -37,7 +36,7 @@
     private final LocalDate usageDateTo;
     private final String product;
     private final String usageType;
-    private final BigDecimal cost;
+    private final Double cost;
     private final String currency;
     @Field("dlabId")
     private final String tag;
diff --git a/services/billing-gcp/src/main/resources/application.yml b/services/billing-gcp/src/main/resources/application.yml
index c84ed68..b2fda5d 100644
--- a/services/billing-gcp/src/main/resources/application.yml
+++ b/services/billing-gcp/src/main/resources/application.yml
@@ -11,7 +11,7 @@
 dlab:
   sbn: <CONF_SERVICE_BASE_NAME>
   bigQueryDataset: <DATASET_NAME>
-  cron: 0 * * * * *
+  cron: 0 0 * * * *
 
 server:
   port: 8088
diff --git a/services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java b/services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java
index 7a51f62..2ff670a 100644
--- a/services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java
+++ b/services/billing-gcp/src/test/java/com/epam/dlab/billing/gcp/service/BillingServiceImplTest.java
@@ -10,7 +10,6 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.math.BigDecimal;
 import java.time.LocalDate;
 import java.util.Collections;
 import java.util.List;
@@ -51,7 +50,7 @@
                 .usageDateTo(LocalDate.MAX)
                 .product("product")
                 .usageType("usageType")
-                .cost(new BigDecimal(1))
+                .cost(1d)
                 .currency("USD")
                 .tag("exploratoryId")
                 .build());
diff --git a/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java b/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java
index 54a590e..dfec0dc 100644
--- a/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java
+++ b/services/common/src/main/java/com/epam/dlab/billing/DlabResourceType.java
@@ -19,10 +19,6 @@
 
 package com.epam.dlab.billing;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
 public enum DlabResourceType {
 	SSN,
 	SSN_BUCKET,
@@ -51,88 +47,6 @@
 		return null;
 	}
 
-	public static String getResourceTypeName(String id) {
-		DlabResourceType resourceTypeId = DlabResourceType.of(id);
-		if (resourceTypeId != null) {
-			switch (resourceTypeId) {
-				case COMPUTATIONAL:
-					return "Cluster";
-				case EXPLORATORY:
-					return "Notebook";
-				case EDGE:
-					return "Edge Node";
-				case VOLUME:
-					return "Volume";
-				case EDGE_BUCKET:
-				case SSN_BUCKET:
-				case COLLABORATION_BUCKET:
-					return "Bucket";
-				case EDGE_CONTAINER:
-				case SSN_CONTAINER:
-				case COLLABORATION_CONTAINER:
-					return "Container";
-				case SSN_STORAGE_ACCOUNT:
-				case EDGE_STORAGE_ACCOUNT:
-				case COLLABORATION_STORAGE_ACCOUNT:
-					return "Storage Account";
-				case SSN:
-					return "SSN";
-				case DATA_LAKE_STORE:
-					return "Data Lake Store Account";
-			}
-		}
-		return id;
-	}
-
-	public static List<String> getResourceTypeIds(List<String> names) {
-		if (names == null || names.isEmpty()) {
-			return Collections.emptyList();
-		}
-
-		List<String> list = new ArrayList<>();
-		names.forEach(e -> {
-			switch (e) {
-				case "Cluster":
-					list.add(DlabResourceType.COMPUTATIONAL.toString());
-					break;
-				case "Notebook":
-					list.add(DlabResourceType.EXPLORATORY.toString());
-					break;
-				case "Edge Node":
-					list.add(DlabResourceType.EDGE.toString());
-					break;
-				case "Bucket":
-					list.add(DlabResourceType.EDGE_BUCKET.toString());
-					list.add(DlabResourceType.SSN_BUCKET.toString());
-					list.add(DlabResourceType.COLLABORATION_BUCKET.toString());
-					break;
-				case "Container":
-					list.add(DlabResourceType.EDGE_CONTAINER.toString());
-					list.add(DlabResourceType.SSN_CONTAINER.toString());
-					list.add(DlabResourceType.COLLABORATION_CONTAINER.toString());
-					break;
-				case "SSN":
-					list.add(DlabResourceType.SSN.toString());
-					break;
-				case "Storage Account":
-					list.add(DlabResourceType.SSN_STORAGE_ACCOUNT.toString());
-					list.add(DlabResourceType.EDGE_STORAGE_ACCOUNT.toString());
-					list.add(DlabResourceType.COLLABORATION_STORAGE_ACCOUNT.toString());
-					break;
-				case "Data Lake Store Account":
-					list.add(DlabResourceType.DATA_LAKE_STORE.toString());
-					break;
-				case "Volume":
-					list.add(DlabResourceType.VOLUME.toString());
-					break;
-				default:
-					list.add(e);
-			}
-		});
-
-		return list;
-	}
-
 	@Override
 	public String toString() {
 		return super.toString().toUpperCase();
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
index 65fb838..16d36be 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
@@ -30,6 +30,7 @@
     private String instanceId;
     @JsonProperty("exploratory_name")
     private String exploratoryName;
+    private String project;
     @JsonProperty("exploratory_id")
     private String exploratoryId;
     @JsonProperty("exploratory_template_name")
@@ -61,6 +62,19 @@
         return self;
     }
 
+    public String getProject() {
+        return project;
+    }
+
+    public void setProject(String project) {
+        this.project = project;
+    }
+
+    public T withProject(String project) {
+        setProject(project);
+        return self;
+    }
+
     public String getExploratoryId() {
         return exploratoryId;
     }
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingResourceType.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingResourceType.java
index 894d3e4..7ad1082 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingResourceType.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/billing/BillingResourceType.java
@@ -22,8 +22,10 @@
 public enum BillingResourceType {
     EDGE,
     SSN,
+    ENDPOINT,
     BUCKET,
     VOLUME,
     EXPLORATORY,
-    COMPUTATIONAL
+    COMPUTATIONAL,
+    IMAGE
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
index 8ccf260..877cc5a 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
@@ -96,8 +96,9 @@
 	@Override
 	protected ComputationalStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
 		return super.getBaseStatusDTO(status)
-				.withExploratoryName(dto.getExploratoryName())
-				.withComputationalName(dto.getComputationalName());
+                .withExploratoryName(dto.getExploratoryName())
+                .withComputationalName(dto.getComputationalName())
+                .withProject(dto.getProject());
 	}
 
 	private String instanceId(JsonNode jsonNode) {
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
index c53c86e..8d6e794 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
@@ -56,6 +56,7 @@
 		return baseStatus
 				.withExploratoryName(dto.getExploratoryName())
 				.withComputationalName(dto.getComputationalName())
+				.withProject(dto.getProject())
 				.withUptime(null)
 				.withLastActivity(Date.from(Instant.now()));
 	}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
index 62746e8..047ebf9 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
@@ -50,14 +50,16 @@
 
 	@JsonProperty
 	private final String exploratoryName;
+	private final String project;
 
 	@JsonCreator
 	public ExploratoryCallbackHandler(@JacksonInject RESTService selfService,
 									  @JsonProperty("action") DockerAction action,
 									  @JsonProperty("uuid") String uuid, @JsonProperty("user") String user,
-									  @JsonProperty("exploratoryName") String exploratoryName) {
+									  String project, @JsonProperty("exploratoryName") String exploratoryName) {
 		super(selfService, user, uuid, action);
 		this.exploratoryName = exploratoryName;
+		this.project = project;
 	}
 
 	@Override
@@ -99,6 +101,8 @@
 
 	@Override
 	protected ExploratoryStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
-		return super.getBaseStatusDTO(status).withExploratoryName(exploratoryName);
+		return super.getBaseStatusDTO(status)
+				.withExploratoryName(exploratoryName)
+				.withProject(project);
 	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
index a31fea3..8d46b60 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
@@ -111,7 +111,8 @@
 	protected LibInstallStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
 		return super.getBaseStatusDTO(status)
 				.withExploratoryName(dto.getExploratoryName())
-				.withUptime(Date.from(Instant.now()))
-				.withComputationalName(dto.getComputationalName());
+				.withComputationalName(dto.getComputationalName())
+				.withProject(dto.getProject())
+				.withUptime(Date.from(Instant.now()));
 	}
 }
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
index e4d9ecf..b15b342 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
@@ -70,7 +70,7 @@
 
 	private FileHandlerCallback getFileHandlerCallback(DockerAction action, String uuid, ExploratoryBaseDTO<?> dto) {
 		return new ExploratoryCallbackHandler(selfService, action, uuid, dto.getCloudSettings().getIamUser(),
-				dto.getExploratoryName());
+				dto.getProject(), dto.getExploratoryName());
 	}
 
 	private String nameContainer(String user, DockerAction action, String name) {
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
@@ -3,7 +3,7 @@
    "response": {
       "result": {
          "Action": "Configure notebook server",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json b/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
index fe2bf0a..81afe8a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
@@ -6,7 +6,7 @@
       "tunnel_port": "22",
       "full_edge_conf": {
         "notebook_role_profile_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-nb-Profile",
-        "tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+        "tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
         "edge_security_group_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-edge-SG",
         "allocation_id": "eipalloc-2801084f",
         "key_name": "${CONF_KEY_NAME}",
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
index 1933ea3..525bd9a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Start up notebook server",
          "ip": "172.31.48.131",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "hostname": "ip-172-31-48-131.us-west-2.compute.internal",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
index 4e1b9f0..e0ee8b1 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Stop notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
index 74c82ad7..a9e2a3a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Terminate notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
@@ -3,7 +3,7 @@
    "response": {
       "result": {
          "Action": "Configure notebook server",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json b/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
index cda1c9e..b2a9931 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
@@ -6,7 +6,7 @@
          "tunnel_port": "22",
          "full_edge_conf": {
             "notebook_role_profile_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-nb-Profile",
-            "tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+            "tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
             "edge_security_group_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-edge-SG",
             "allocation_id": "eipalloc-2801084f",
             "key_name": "${CONF_KEY_NAME}",
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
index 1933ea3..525bd9a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Start up notebook server",
          "ip": "172.31.48.131",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "hostname": "ip-172-31-48-131.us-west-2.compute.internal",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
index 4e1b9f0..e0ee8b1 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Stop notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
index 74c82ad7..a9e2a3a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
@@ -4,7 +4,7 @@
       "result": {
          "Action": "Terminate notebook server",
          "user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
@@ -3,7 +3,7 @@
    "response": {
       "result": {
          "Action": "Configure notebook server",
-         "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+         "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
          "notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
       },
       "log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java b/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
index bf92609..a065248 100644
--- a/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
+++ b/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
@@ -70,7 +70,7 @@
 
 		RESTServiceMock selfService = new RESTServiceMock();
 		ExploratoryCallbackHandler handler = new ExploratoryCallbackHandler(selfService, action,
-				getRequestId(exec), getEdgeUserName(exec), getExploratoryName(exec));
+				getRequestId(exec), getEdgeUserName(exec), "", getExploratoryName(exec));
 		handler.handle(exec.getResponseFileName(), Files.readAllBytes(Paths.get(exec.getResponseFileName())));
 
 		try {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/ProjectAdmin.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/ProjectAdmin.java
new file mode 100644
index 0000000..2fca3cd
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/ProjectAdmin.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface ProjectAdmin {
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/User.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/User.java
new file mode 100644
index 0000000..b56dd20
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/annotation/User.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Target(ElementType.PARAMETER)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface User {
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java
index 71223db..d22e400 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BaseBillingDAO.java
@@ -19,89 +19,30 @@
 
 package com.epam.dlab.backendapi.dao;
 
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.domain.BaseShape;
-import com.epam.dlab.backendapi.domain.DataEngineServiceShape;
-import com.epam.dlab.backendapi.domain.DataEngineShape;
-import com.epam.dlab.backendapi.domain.EndpointShape;
-import com.epam.dlab.backendapi.domain.ExploratoryShape;
-import com.epam.dlab.backendapi.domain.SsnShape;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.backendapi.roles.RoleType;
-import com.epam.dlab.backendapi.roles.UserRoles;
-import com.epam.dlab.billing.BillingCalculationUtils;
-import com.epam.dlab.billing.DlabResourceType;
-import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.model.aws.ReportLine;
-import com.google.common.collect.Lists;
 import com.google.inject.Inject;
-import com.mongodb.client.AggregateIterable;
-import com.mongodb.client.FindIterable;
-import com.mongodb.client.model.Aggregates;
-import com.mongodb.client.model.Filters;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-import org.bson.Document;
 import org.bson.conversions.Bson;
 
 import java.math.BigDecimal;
-import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.Optional;
 import java.util.function.Supplier;
-import java.util.stream.StreamSupport;
 
-import static com.epam.dlab.backendapi.dao.ComputationalDAO.COMPUTATIONAL_ID;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_ID;
 import static com.epam.dlab.backendapi.dao.MongoCollections.BILLING;
-import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_RESOURCE_TYPE;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_USAGE_DATE;
 import static com.mongodb.client.model.Accumulators.sum;
 import static com.mongodb.client.model.Aggregates.group;
 import static com.mongodb.client.model.Aggregates.match;
 import static com.mongodb.client.model.Filters.eq;
-import static com.mongodb.client.model.Filters.gte;
-import static com.mongodb.client.model.Filters.in;
-import static com.mongodb.client.model.Filters.lte;
-import static com.mongodb.client.model.Filters.regex;
-import static com.mongodb.client.model.Projections.excludeId;
-import static com.mongodb.client.model.Projections.fields;
-import static com.mongodb.client.model.Projections.include;
 import static java.util.Collections.singletonList;
 
 @Slf4j
-public abstract class BaseBillingDAO extends BaseDAO implements BillingDAO {
-
-	public static final String SHAPE = "shape";
-	public static final String SERVICE_BASE_NAME = "service_base_name";
-	public static final String ITEMS = "lines";
-	public static final String COST_TOTAL = "cost_total";
-	public static final String FULL_REPORT = "full_report";
+public class BaseBillingDAO extends BaseDAO implements BillingDAO {
 
 	private static final String PROJECT = "project";
-	private static final String MASTER_NODE_SHAPE = "master_node_shape";
-	private static final String SLAVE_NODE_SHAPE = "slave_node_shape";
-	private static final String TOTAL_INSTANCE_NUMBER = "total_instance_number";
-
-	private static final String DATAENGINE_SHAPE = "dataengine_instance_shape";
-	private static final String DATAENGINE_INSTANCE_COUNT = "dataengine_instance_count";
-
-	private static final String DATAENGINE_DOCKER_IMAGE = "image";
 	private static final int ONE_HUNDRED = 100;
 	private static final String TOTAL_FIELD_NAME = "total";
 	private static final String COST_FIELD = "$cost";
-	public static final String SHARED_RESOURCE_NAME = "Shared resource";
-	protected static final String FIELD_PROJECT = "project";
-	private static final String EDGE_FORMAT = "%s-%s-%s-edge";
-	private static final String PROJECT_COLLECTION = "Projects";
-	private static final String TAGS = "tags";
 
 	@Inject
 	protected SettingsDAO settings;
@@ -111,160 +52,6 @@
 	private ProjectDAO projectDAO;
 
 	@Override
-	public Document getReport(UserInfo userInfo, BillingFilter filter) {
-		boolean isFullReport = UserRoles.checkAccess(userInfo, RoleType.PAGE, "/api/infrastructure_provision/billing",
-				userInfo.getRoles());
-		setUserFilter(userInfo, filter, isFullReport);
-		List<Bson> matchCriteria = matchCriteria(filter);
-		List<Bson> pipeline = new ArrayList<>();
-		if (!matchCriteria.isEmpty()) {
-			pipeline.add(Aggregates.match(Filters.and(matchCriteria)));
-		}
-		pipeline.add(groupCriteria());
-		pipeline.add(sortCriteria());
-		final Map<String, BaseShape> shapes = getShapes(filter.getShapes());
-		return prepareReport(filter.getStatuses(), !filter.getShapes().isEmpty(),
-				getCollection(BILLING).aggregate(pipeline), shapes, isFullReport);
-	}
-
-	private Document prepareReport(List<UserInstanceStatus> statuses, boolean filterByShape,
-								   AggregateIterable<Document> agg,
-								   Map<String, BaseShape> shapes, boolean fullReport) {
-
-		List<Document> reportItems = new ArrayList<>();
-
-		String usageDateStart = null;
-		String usageDateEnd = null;
-		double costTotal = 0D;
-
-		for (Document d : agg) {
-			Document id = (Document) d.get(MongoKeyWords.MONGO_ID);
-			String resourceId = id.getString(dlabIdFieldName());
-			BaseShape shape = shapes.get(resourceId);
-			final UserInstanceStatus status = Optional.ofNullable(shape).map(BaseShape::getStatus).orElse(null);
-			if ((filterByShape && shape == null) ||
-					(!statuses.isEmpty() && statuses.stream().noneMatch(s -> s.equals(status)))) {
-				continue;
-			}
-
-
-			String dateStart = d.getString(MongoKeyWords.USAGE_FROM);
-			if (StringUtils.compare(usageDateStart, dateStart, false) > 0) {
-				usageDateStart = dateStart;
-			}
-			String dateEnd = d.getString(MongoKeyWords.USAGE_TO);
-			if (StringUtils.compare(usageDateEnd, dateEnd) < 0) {
-				usageDateEnd = dateEnd;
-			}
-
-
-			costTotal += d.getDouble(MongoKeyWords.COST);
-
-			final String dlabResourceType = id.getString("dlab_resource_type");
-			final String statusString = Optional
-					.ofNullable(status)
-					.map(UserInstanceStatus::toString)
-					.orElse(StringUtils.EMPTY);
-
-			Document item = new Document()
-					.append(MongoKeyWords.DLAB_USER, getOrDefault(id.getString(USER)))
-					.append(dlabIdFieldName(), resourceId)
-					.append(shapeFieldName(), Optional.ofNullable(shape).map(BaseShape::format)
-							.orElse(StringUtils.EMPTY))
-					.append("dlab_resource_type", DlabResourceType
-							.getResourceTypeName(dlabResourceType)) //todo check on azure!!!
-					.append(STATUS, statusString)
-					.append(FIELD_RESOURCE_TYPE, resourceType(id))
-					.append(productFieldName(), id.getString(productFieldName()))
-					.append(PROJECT, getOrDefault(id.getString(PROJECT)))
-					.append(MongoKeyWords.COST, d.getDouble(MongoKeyWords.COST))
-					.append(costFieldName(), BillingCalculationUtils.formatDouble(d.getDouble(MongoKeyWords
-							.COST)))
-					.append(currencyCodeFieldName(), id.getString(currencyCodeFieldName()))
-					.append(usageDateFromFieldName(), dateStart)
-					.append(usageDateToFieldName(), dateEnd);
-
-			reportItems.add(item);
-		}
-
-		return new Document()
-				.append(SERVICE_BASE_NAME, settings.getServiceBaseName())
-				.append(usageDateFromFieldName(), usageDateStart)
-				.append(usageDateToFieldName(), usageDateEnd)
-				.append(ITEMS, reportItems)
-				.append(COST_TOTAL, BillingCalculationUtils.formatDouble(BillingCalculationUtils.round
-						(costTotal, 2)))
-				.append(currencyCodeFieldName(), (reportItems.isEmpty() ? null :
-						reportItems.get(0).getString(currencyCodeFieldName())))
-				.append(FULL_REPORT, fullReport);
-
-	}
-
-	protected String resourceType(Document id) {
-		return id.getString(FIELD_RESOURCE_TYPE);
-	}
-
-	protected String currencyCodeFieldName() {
-		return "currency_code";
-	}
-
-	protected String usageDateToFieldName() {
-		return MongoKeyWords.USAGE_TO;
-	}
-
-	protected String costFieldName() {
-		return MongoKeyWords.COST;
-	}
-
-	protected String productFieldName() {
-		return ReportLine.FIELD_PRODUCT;
-	}
-
-	protected String usageDateFromFieldName() {
-		return MongoKeyWords.USAGE_FROM;
-	}
-
-	protected String dlabIdFieldName() {
-		return ReportLine.FIELD_DLAB_ID;
-	}
-
-	protected String shapeFieldName() {
-		return SHAPE;
-	}
-
-	protected abstract Bson sortCriteria();
-
-	protected abstract Bson groupCriteria();
-
-	private Map<String, BaseShape> getShapes(List<String> shapeNames) {
-		FindIterable<Document> userInstances = getUserInstances();
-		final Map<String, BaseShape> shapes = new HashMap<>();
-
-		for (Document d : userInstances) {
-			getExploratoryShape(shapeNames, d)
-					.ifPresent(shape -> shapes.put(d.getString(EXPLORATORY_ID), shape));
-			@SuppressWarnings("unchecked")
-			List<Document> comp = (List<Document>) d.get(COMPUTATIONAL_RESOURCES);
-			comp.forEach(c -> (isDataEngine(c.getString(DATAENGINE_DOCKER_IMAGE)) ? getDataEngineShape(shapeNames, c) :
-					getDataEngineServiceShape(shapeNames, c))
-					.ifPresent(shape -> shapes.put(c.getString(COMPUTATIONAL_ID), shape)));
-		}
-
-		StreamSupport.stream(getCollection(PROJECT_COLLECTION).find().spliterator(), false)
-				.forEach(d -> ((List<Document>) d.get("endpoints"))
-						.forEach(endpoint -> getEndpointShape(shapeNames, endpoint)
-								.ifPresent(shape -> shapes.put(String.format(EDGE_FORMAT, getServiceBaseName(),
-										d.getString("name").toLowerCase(),
-										endpoint.getString("name")), shape))));
-
-		getSsnShape(shapeNames)
-				.ifPresent(shape -> shapes.put(getServiceBaseName() + "-ssn", shape));
-
-		log.trace("Loaded shapes is {}", shapes);
-		return shapes;
-	}
-
-	@Override
 	public Double getTotalCost() {
 		return aggregateBillingData(singletonList(group(null, sum(TOTAL_FIELD_NAME, COST_FIELD))));
 	}
@@ -306,7 +93,6 @@
 				.isPresent();
 	}
 
-
 	@Override
 	public boolean isProjectQuoteReached(String project) {
 		final Double projectCost = getProjectCost(project);
@@ -320,10 +106,6 @@
 		return toPercentage(() -> projectDAO.getAllowedBudget(project), getProjectCost(project));
 	}
 
-	private String getOrDefault(String value) {
-		return StringUtils.isNotBlank(value) ? value : SHARED_RESOURCE_NAME;
-	}
-
 	private Integer toPercentage(Supplier<Optional<Integer>> allowedBudget, Double totalCost) {
 		return allowedBudget.get()
 				.map(userBudget -> (totalCost * ONE_HUNDRED) / userBudget)
@@ -331,152 +113,9 @@
 				.orElse(BigDecimal.ZERO.intValue());
 	}
 
-	private List<Bson> matchCriteria(BillingFilter filter) {
-
-		List<Bson> searchCriteria = new ArrayList<>();
-
-		if (filter.getUsers() != null && !filter.getUsers().isEmpty()) {
-			searchCriteria.add(Filters.in(MongoKeyWords.DLAB_USER, filter.getUsers()));
-		}
-
-		if (filter.getResourceTypes() != null && !filter.getResourceTypes().isEmpty()) {
-			searchCriteria.add(Filters.in("dlab_resource_type",
-					DlabResourceType.getResourceTypeIds(filter.getResourceTypes())));
-		}
-
-		if (filter.getDlabId() != null && !filter.getDlabId().isEmpty()) {
-			searchCriteria.add(regex(dlabIdFieldName(), filter.getDlabId(), "i"));
-		}
-
-		if (filter.getDateStart() != null && !filter.getDateStart().isEmpty()) {
-			searchCriteria.add(gte(FIELD_USAGE_DATE, filter.getDateStart()));
-		}
-		if (filter.getDateEnd() != null && !filter.getDateEnd().isEmpty()) {
-			searchCriteria.add(lte(FIELD_USAGE_DATE, filter.getDateEnd()));
-		}
-		if (filter.getProjects() != null && !filter.getProjects().isEmpty()) {
-			searchCriteria.add(in(PROJECT, filter.getProjects()));
-		}
-
-		searchCriteria.addAll(cloudMatchCriteria(filter));
-		return searchCriteria;
-	}
-
-	protected abstract List<Bson> cloudMatchCriteria(BillingFilter filter);
-
 	private Double aggregateBillingData(List<Bson> pipeline) {
 		return Optional.ofNullable(aggregate(BILLING, pipeline).first())
 				.map(d -> d.getDouble(TOTAL_FIELD_NAME))
 				.orElse(BigDecimal.ZERO.doubleValue());
 	}
-
-	private FindIterable<Document> getUserInstances() {
-		return getCollection(USER_INSTANCES)
-				.find()
-				.projection(
-						fields(excludeId(),
-								include(SHAPE, EXPLORATORY_ID, STATUS, TAGS,
-										COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_ID,
-										COMPUTATIONAL_RESOURCES + "." + MASTER_NODE_SHAPE,
-										COMPUTATIONAL_RESOURCES + "." + SLAVE_NODE_SHAPE,
-										COMPUTATIONAL_RESOURCES + "." + TOTAL_INSTANCE_NUMBER,
-										COMPUTATIONAL_RESOURCES + "." + DATAENGINE_SHAPE,
-										COMPUTATIONAL_RESOURCES + "." + DATAENGINE_INSTANCE_COUNT,
-										COMPUTATIONAL_RESOURCES + "." + DATAENGINE_DOCKER_IMAGE,
-										COMPUTATIONAL_RESOURCES + "." + STATUS,
-										COMPUTATIONAL_RESOURCES + "." + TAGS
-								)));
-	}
-
-	private Optional<ExploratoryShape> getExploratoryShape(List<String> shapeNames, Document d) {
-		final String shape = d.getString(SHAPE);
-		if (isShapeAcceptable(shapeNames, shape)) {
-			return Optional.of(ExploratoryShape.builder()
-					.shape(shape)
-					.status(UserInstanceStatus.of(d.getString(STATUS)))
-					.tags((Map<String, String>) d.get(TAGS))
-					.build());
-		}
-		return Optional.empty();
-	}
-
-	private Optional<DataEngineServiceShape> getDataEngineServiceShape(List<String> shapeNames, Document c) {
-		final String desMasterShape = c.getString(MASTER_NODE_SHAPE);
-		final String desSlaveShape = c.getString(SLAVE_NODE_SHAPE);
-		if (isShapeAcceptable(shapeNames, desMasterShape, desSlaveShape)) {
-			return Optional.of(DataEngineServiceShape.builder()
-					.shape(desMasterShape)
-					.status(UserInstanceStatus.of(c.getString(STATUS)))
-					.slaveCount(c.getString(TOTAL_INSTANCE_NUMBER))
-					.slaveShape(desSlaveShape)
-					.tags((Map<String, String>) c.get(TAGS))
-					.build());
-		}
-		return Optional.empty();
-	}
-
-	private Optional<DataEngineShape> getDataEngineShape(List<String> shapeNames, Document c) {
-		final String shape = c.getString(DATAENGINE_SHAPE);
-		if ((isShapeAcceptable(shapeNames, shape)) && StringUtils.isNotEmpty(c.getString(COMPUTATIONAL_ID))) {
-
-			return Optional.of(DataEngineShape.builder()
-					.shape(shape)
-					.status(UserInstanceStatus.of(c.getString(STATUS)))
-					.slaveCount(c.getString(DATAENGINE_INSTANCE_COUNT))
-					.tags((Map<String, String>) c.get(TAGS))
-					.build());
-		}
-		return Optional.empty();
-	}
-
-	private Optional<SsnShape> getSsnShape(List<String> shapeNames) {
-		final String shape = getSsnShape();
-		if (isShapeAcceptable(shapeNames, shape)) {
-			return Optional.of(SsnShape.builder()
-					.shape(shape)
-					.status(UserInstanceStatus.RUNNING)
-					.build());
-		}
-		return Optional.empty();
-	}
-
-	private Optional<EndpointShape> getEndpointShape(List<String> shapeNames, Document endpoint) {
-		if (isShapeAcceptable(shapeNames, getSsnShape())) {
-			return Optional.of(EndpointShape.builder()
-					.shape(StringUtils.EMPTY)
-					.status(UserInstanceStatus.of(endpoint.getString("status")))
-					.build());
-		}
-		return Optional.empty();
-	}
-
-	private boolean isDataEngine(String dockerImage) {
-		return DataEngineType.fromDockerImageName(dockerImage) == DataEngineType.SPARK_STANDALONE;
-	}
-
-	private boolean isShapeAcceptable(List<String> shapeNames, String... shapes) {
-		return shapeNames == null || shapeNames.isEmpty() || Arrays.stream(shapes).anyMatch(shapeNames::contains);
-	}
-
-	protected String getServiceBaseName() {
-		return settings.getServiceBaseName();
-	}
-
-	protected String getSsnShape() {
-		return settings.getSsnInstanceSize();
-	}
-
-	protected void usersToLowerCase(List<String> users) {
-		if (users != null) {
-			users.replaceAll(u -> u != null ? u.toLowerCase() : null);
-		}
-	}
-
-	protected void setUserFilter(UserInfo userInfo, BillingFilter filter, boolean isFullReport) {
-		if (isFullReport) {
-			usersToLowerCase(filter.getUsers());
-		} else {
-			filter.setUsers(Lists.newArrayList(userInfo.getName().toLowerCase()));
-		}
-	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java
index 1ea06b8..d50c62f 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/BillingDAO.java
@@ -18,10 +18,6 @@
  */
 package com.epam.dlab.backendapi.dao;
 
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import org.bson.Document;
-
 public interface BillingDAO {
 	Double getTotalCost();
 
@@ -40,6 +36,4 @@
 	boolean isUserQuoteReached(String user);
 
 	boolean isProjectQuoteReached(String project);
-
-	Document getReport(UserInfo userInfo, BillingFilter filter);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
index 311158a..683f8fc 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
@@ -21,7 +21,11 @@
 
 
 import com.epam.dlab.backendapi.util.DateRemoverUtil;
-import com.epam.dlab.dto.*;
+import com.epam.dlab.dto.ResourceURL;
+import com.epam.dlab.dto.SchedulerJobDTO;
+import com.epam.dlab.dto.StatusEnvBaseDTO;
+import com.epam.dlab.dto.UserInstanceDTO;
+import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.computational.ComputationalStatusDTO;
@@ -36,15 +40,30 @@
 
 import java.time.LocalDateTime;
 import java.time.ZoneId;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
 
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.UPTIME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
 import static com.epam.dlab.backendapi.dao.SchedulerJobDAO.SCHEDULER_DATA;
 import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
-import static com.mongodb.client.model.Filters.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.ne;
+import static com.mongodb.client.model.Filters.not;
 import static com.mongodb.client.model.Projections.elemMatch;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static com.mongodb.client.model.Updates.push;
 import static com.mongodb.client.model.Updates.set;
 import static java.util.stream.Collectors.toList;
@@ -70,8 +89,8 @@
 		return COMPUTATIONAL_RESOURCES + FIELD_SET_DELIMETER + fieldName;
 	}
 
-	private static Bson computationalCondition(String user, String exploratoryName, String compName) {
-		return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName),
+	private static Bson computationalCondition(String user, String project, String exploratoryName, String compName) {
+		return and(eq(USER, user), eq(PROJECT, project), eq(EXPLORATORY_NAME, exploratoryName),
 				eq(COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_NAME, compName));
 	}
 
@@ -80,12 +99,14 @@
 	 *
 	 * @param user             user name.
 	 * @param exploratoryName  name of exploratory.
+	 * @param project          name of project
 	 * @param computationalDTO object of computational resource.
 	 * @return <b>true</b> if operation was successful, otherwise <b>false</b>.
 	 */
-	public boolean addComputational(String user, String exploratoryName, UserComputationalResource computationalDTO) {
+	public boolean addComputational(String user, String exploratoryName, String project,
+									UserComputationalResource computationalDTO) {
 		final UpdateResult updateResult = updateOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						not(elemMatch(COMPUTATIONAL_RESOURCES,
 								eq(COMPUTATIONAL_NAME, computationalDTO.getComputationalName())))),
 				push(COMPUTATIONAL_RESOURCES, convertToBson(computationalDTO)));
@@ -96,14 +117,15 @@
 	 * Finds and returns the of computational resource.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @throws DlabException if exception occurs
 	 */
-	public UserComputationalResource fetchComputationalFields(String user, String exploratoryName,
+	public UserComputationalResource fetchComputationalFields(String user, String project, String exploratoryName,
 															  String computationalName) {
 		Optional<UserInstanceDTO> opt = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						Filters.elemMatch(COMPUTATIONAL_RESOURCES, eq(COMPUTATIONAL_NAME, computationalName))),
 				fields(include(COMPUTATIONAL_RESOURCES + ".$"), excludeId()),
 				UserInstanceDTO.class);
@@ -114,10 +136,10 @@
 						"exploratory name " + exploratoryName + " not found."));
 	}
 
-	public List<UserComputationalResource> findComputationalResourcesWithStatus(String user, String exploratoryName,
+	public List<UserComputationalResource> findComputationalResourcesWithStatus(String user, String project, String exploratoryName,
 																				UserInstanceStatus status) {
 		final UserInstanceDTO userInstanceDTO = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						elemMatch(COMPUTATIONAL_RESOURCES, eq(STATUS, status.toString()))),
 				fields(include(COMPUTATIONAL_RESOURCES), excludeId()),
 				UserInstanceDTO.class)
@@ -139,7 +161,7 @@
 		try {
 			Document values = new Document(computationalFieldFilter(STATUS), dto.getStatus());
 			return updateOne(USER_INSTANCES,
-					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 							elemMatch(COMPUTATIONAL_RESOURCES,
 									and(eq(COMPUTATIONAL_NAME, dto.getComputationalName()),
 											not(eq(STATUS, TERMINATED.toString()))))),
@@ -162,7 +184,7 @@
 		UpdateResult result;
 		do {
 			result = updateOne(USER_INSTANCES,
-					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+					and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 							elemMatch(COMPUTATIONAL_RESOURCES,
 									and(not(eq(STATUS, TERMINATED.toString())),
 											not(eq(STATUS, dto.getStatus()))))),
@@ -174,80 +196,51 @@
 		return count;
 	}
 
-	public void updateComputationalStatusesForExploratory(String user, String exploratoryName,
+	public void updateComputationalStatusesForExploratory(String user, String project, String exploratoryName,
 														  UserInstanceStatus dataengineStatus,
 														  UserInstanceStatus dataengineServiceStatus,
 														  UserInstanceStatus... excludedStatuses) {
-		updateComputationalResource(user, exploratoryName, dataengineStatus, DataEngineType.SPARK_STANDALONE,
-				excludedStatuses);
-		updateComputationalResource(user, exploratoryName, dataengineServiceStatus, DataEngineType.CLOUD_SERVICE,
-				excludedStatuses);
-
-	}
-
-	/**
-	 * Updates status for all corresponding computational resources in Mongo database.
-	 *
-	 * @param newStatus                new status for computational resources.
-	 * @param user                     user name.
-	 * @param exploratoryStatuses      exploratory's status list.
-	 * @param computationalTypes       type list of computational resource (may contain 'dataengine' and/or
-	 *                                 'dataengine-service').
-	 * @param oldComputationalStatuses old statuses of computational resources.
-	 */
-
-	public void updateStatusForComputationalResources(UserInstanceStatus newStatus, String user,
-													  List<UserInstanceStatus> exploratoryStatuses,
-													  List<DataEngineType> computationalTypes,
-													  UserInstanceStatus... oldComputationalStatuses) {
-
-		List<String> exploratoryNames = stream(find(USER_INSTANCES,
-				and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses))),
-				fields(include(EXPLORATORY_NAME)))).map(d -> d.getString(EXPLORATORY_NAME))
-				.collect(toList());
-
-		exploratoryNames.forEach(explName ->
-				getComputationalResourcesWhereStatusIn(user, computationalTypes, explName, oldComputationalStatuses)
-						.forEach(compName -> updateComputationalField(user, explName, compName,
-								STATUS, newStatus.toString()))
-		);
+		updateComputationalResource(user, project, exploratoryName, dataengineStatus,
+				DataEngineType.SPARK_STANDALONE, excludedStatuses);
+		updateComputationalResource(user, project, exploratoryName, dataengineServiceStatus,
+				DataEngineType.CLOUD_SERVICE, excludedStatuses);
 	}
 
 	/**
 	 * Updates the status for single computational resource in Mongo database.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   exploratory's name.
 	 * @param computationalName name of computational resource.
 	 * @param newStatus         new status of computational resource.
 	 */
 
-	public void updateStatusForComputationalResource(String user, String exploratoryName,
-													 String computationalName,
-													 UserInstanceStatus newStatus) {
-		updateComputationalField(user, exploratoryName, computationalName, STATUS, newStatus.toString());
+	public void updateStatusForComputationalResource(String user, String project, String exploratoryName,
+													 String computationalName, UserInstanceStatus newStatus) {
+		updateComputationalField(user, project, exploratoryName, computationalName, STATUS, newStatus.toString());
 	}
 
 
-	private void updateComputationalResource(String user, String exploratoryName,
+	private void updateComputationalResource(String user, String project, String exploratoryName,
 											 UserInstanceStatus dataengineServiceStatus, DataEngineType cloudService,
 											 UserInstanceStatus... excludedStatuses) {
 		UpdateResult result;
 		do {
 			result = updateMany(USER_INSTANCES,
-					computationalFilter(user, exploratoryName, dataengineServiceStatus.toString(),
-							DataEngineType.getDockerImageName(cloudService), excludedStatuses),
+					computationalFilter(user, project, exploratoryName,
+							dataengineServiceStatus.toString(), DataEngineType.getDockerImageName(cloudService), excludedStatuses),
 					new Document(SET,
 							new Document(computationalFieldFilter(STATUS), dataengineServiceStatus.toString())));
 		} while (result.getModifiedCount() > 0);
 	}
 
-	private Bson computationalFilter(String user, String exploratoryName, String computationalStatus, String
-			computationalImage, UserInstanceStatus[] excludedStatuses) {
+	private Bson computationalFilter(String user, String project, String exploratoryName, String computationalStatus,
+									 String computationalImage, UserInstanceStatus[] excludedStatuses) {
 		final String[] statuses = Arrays.stream(excludedStatuses)
 				.map(UserInstanceStatus::toString)
 				.toArray(String[]::new);
-		return and(exploratoryCondition(user, exploratoryName),
+		return and(exploratoryCondition(user, exploratoryName, project),
 				elemMatch(COMPUTATIONAL_RESOURCES, and(eq(IMAGE, computationalImage),
 						not(in(STATUS, statuses)),
 						not(eq(STATUS, computationalStatus)))));
@@ -286,7 +279,7 @@
 				values.append(computationalFieldFilter(CONFIG),
 						dto.getConfig().stream().map(this::convertToBson).collect(toList()));
 			}
-			return updateOne(USER_INSTANCES, and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+			return updateOne(USER_INSTANCES, and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 					elemMatch(COMPUTATIONAL_RESOURCES,
 							and(eq(COMPUTATIONAL_NAME, dto.getComputationalName()),
 									not(eq(STATUS, TERMINATED.toString()))))),
@@ -309,49 +302,19 @@
 		return map;
 	}
 
-
-	/**
-	 * Updates the requirement for reuploading key for all corresponding computational resources in Mongo database.
-	 *
-	 * @param user                  user name.
-	 * @param exploratoryStatuses   exploratory's status list.
-	 * @param computationalTypes    type list of computational resource (may contain 'dataengine' and/or
-	 *                              'dataengine-service').
-	 * @param reuploadKeyRequired   true/false.
-	 * @param computationalStatuses statuses of computational resource.
-	 */
-
-	public void updateReuploadKeyFlagForComputationalResources(String user,
-															   List<UserInstanceStatus> exploratoryStatuses,
-															   List<DataEngineType> computationalTypes,
-															   boolean reuploadKeyRequired,
-															   UserInstanceStatus... computationalStatuses) {
-
-		List<String> exploratoryNames = stream(find(USER_INSTANCES,
-				and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses))),
-				fields(include(EXPLORATORY_NAME)))).map(d -> d.getString(EXPLORATORY_NAME))
-				.collect(toList());
-
-		exploratoryNames.forEach(explName ->
-				getComputationalResourcesWhereStatusIn(user, computationalTypes, explName, computationalStatuses)
-						.forEach(compName -> updateComputationalField(user, explName, compName,
-								REUPLOAD_KEY_REQUIRED, reuploadKeyRequired))
-		);
-	}
-
 	/**
 	 * Updates the requirement for reuploading key for single computational resource in Mongo database.
 	 *
 	 * @param user                user name.
+	 * @param project             project name
 	 * @param exploratoryName     exploratory's name.
 	 * @param computationalName   name of computational resource.
 	 * @param reuploadKeyRequired true/false.
 	 */
 
-	public void updateReuploadKeyFlagForComputationalResource(String user, String exploratoryName,
-															  String computationalName, boolean
-																	  reuploadKeyRequired) {
-		updateComputationalField(user, exploratoryName, computationalName, REUPLOAD_KEY_REQUIRED, reuploadKeyRequired);
+	public void updateReuploadKeyFlagForComputationalResource(String user, String project, String exploratoryName,
+															  String computationalName, boolean reuploadKeyRequired) {
+		updateComputationalField(user, project, exploratoryName, computationalName, REUPLOAD_KEY_REQUIRED, reuploadKeyRequired);
 	}
 
 	/**
@@ -359,6 +322,7 @@
 	 * have predefined type.
 	 *
 	 * @param user                  user name.
+	 * @param project               project name
 	 * @param computationalTypes    type list of computational resource which may contain 'dataengine' and/or
 	 *                              'dataengine-service'.
 	 * @param exploratoryName       name of exploratory.
@@ -367,10 +331,11 @@
 	 */
 
 	@SuppressWarnings("unchecked")
-	public List<String> getComputationalResourcesWhereStatusIn(String user, List<DataEngineType> computationalTypes,
+	public List<String> getComputationalResourcesWhereStatusIn(String user, String project,
+															   List<DataEngineType> computationalTypes,
 															   String exploratoryName,
 															   UserInstanceStatus... computationalStatuses) {
-		return stream((List<Document>) find(USER_INSTANCES, and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName)),
+		return stream((List<Document>) find(USER_INSTANCES, exploratoryCondition(user, exploratoryName, project),
 				fields(include(COMPUTATIONAL_RESOURCES))).first().get(COMPUTATIONAL_RESOURCES))
 				.filter(doc ->
 						statusList(computationalStatuses).contains(doc.getString(STATUS)) &&
@@ -379,9 +344,9 @@
 	}
 
 	@SuppressWarnings("unchecked")
-	public List<ClusterConfig> getClusterConfig(String user, String exploratoryName, String computationalName) {
+	public List<ClusterConfig> getClusterConfig(String user, String project, String exploratoryName, String computationalName) {
 		return findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						Filters.elemMatch(COMPUTATIONAL_RESOURCES, and(eq(COMPUTATIONAL_NAME, computationalName),
 								notNull(CONFIG)))),
 				fields(include(COMPUTATIONAL_RESOURCES + ".$"), excludeId())
@@ -396,41 +361,42 @@
 	 * Updates computational resource's field.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @param fieldName         computational field's name for updating.
 	 * @param fieldValue        computational field's value for updating.
 	 */
 
-	private <T> UpdateResult updateComputationalField(String user, String exploratoryName, String computationalName,
+	private <T> UpdateResult updateComputationalField(String user, String project, String exploratoryName, String computationalName,
 													  String fieldName, T fieldValue) {
 		return updateOne(USER_INSTANCES,
-				computationalCondition(user, exploratoryName, computationalName),
+				computationalCondition(user, project, exploratoryName, computationalName),
 				set(computationalFieldFilter(fieldName), fieldValue));
 	}
 
-	public void updateSchedulerSyncFlag(String user, String exploratoryName, boolean syncFlag) {
+	public void updateSchedulerSyncFlag(String user, String project, String exploratoryName, boolean syncFlag) {
 		final String syncStartField = SCHEDULER_DATA + ".sync_start_required";
 		UpdateResult result;
 		do {
 
-			result = updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName),
+			result = updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project),
 					elemMatch(COMPUTATIONAL_RESOURCES, and(ne(SCHEDULER_DATA, null), ne(syncStartField, syncFlag)))),
 					set(computationalFieldFilter(syncStartField), syncFlag));
 
 		} while (result.getModifiedCount() != 0);
 	}
 
-	public UpdateResult updateSchedulerDataForComputationalResource(String user, String exploratoryName,
+	public UpdateResult updateSchedulerDataForComputationalResource(String user, String project, String exploratoryName,
 																	String computationalName, SchedulerJobDTO dto) {
-		return updateComputationalField(user, exploratoryName, computationalName, SCHEDULER_DATA,
-				Objects.isNull(dto) ? null : convertToBson(dto));
+		return updateComputationalField(user, project, exploratoryName, computationalName,
+				SCHEDULER_DATA, Objects.isNull(dto) ? null : convertToBson(dto));
 	}
 
-	public void updateLastActivity(String user, String exploratoryName,
+	public void updateLastActivity(String user, String project, String exploratoryName,
 								   String computationalName, LocalDateTime lastActivity) {
 		updateOne(USER_INSTANCES,
-				computationalCondition(user, exploratoryName, computationalName),
+				computationalCondition(user, project, exploratoryName, computationalName),
 				set(computationalFieldFilter(COMPUTATIONAL_LAST_ACTIVITY),
 						Date.from(lastActivity.atZone(ZoneId.systemDefault()).toInstant())));
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
index 0e85908..f554873 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
@@ -23,8 +23,6 @@
 import com.epam.dlab.backendapi.SelfServiceApplication;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.resources.aws.ComputationalResourceAws;
-import com.epam.dlab.backendapi.resources.dto.HealthStatusEnum;
-import com.epam.dlab.backendapi.resources.dto.HealthStatusPageDTO;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.base.DataEngineType;
@@ -40,18 +38,39 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import java.util.stream.Stream;
 
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_EDGE;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
-import static com.mongodb.client.model.Filters.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.not;
+import static com.mongodb.client.model.Filters.or;
 import static com.mongodb.client.model.Projections.elemMatch;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static java.util.Objects.nonNull;
 
 /**
@@ -145,31 +164,20 @@
 	}
 
 	/**
-	 * @param user       the name of user.
-	 * @param fullReport return full report if <b>true</b> otherwise common status only.
-	 * @throws DlabException in case of any exception
-	 */
-	public HealthStatusPageDTO getHealthStatusPageDTO(String user, boolean fullReport) {
-		return new HealthStatusPageDTO()
-				.withStatus(HealthStatusEnum.OK)
-				.withListResources(Collections.emptyList());
-	}
-
-
-	/**
 	 * Updates the status of exploratory and computational for user.
 	 *
-	 * @param user the name of user.
-	 * @param list the status of node.
+	 * @param user    the name of user.
+	 * @param project name of project
+	 * @param list    the status of node.
 	 */
-	public void updateEnvStatus(String user, EnvResourceList list) {
+	public void updateEnvStatus(String user, String project, EnvResourceList list) {
 		if (list != null && notEmpty(list.getHostList())) {
 			updateEdgeStatus(user, list.getHostList());
 			if (!list.getHostList().isEmpty()) {
 				stream(find(USER_INSTANCES, eq(USER, user),
 						fields(INCLUDE_EXP_UPDATE_FIELDS, excludeId())))
 						.filter(this::instanceIdPresent)
-						.forEach(exp -> updateUserResourceStatuses(user, list, exp));
+						.forEach(exp -> updateUserResourceStatuses(user, project, list, exp));
 			}
 		}
 	}
@@ -189,32 +197,26 @@
 				.collect(Collectors.toSet());
 	}
 
-	public Set<String> fetchAllUsers() {
-		return stream(find(USER_EDGE)).map(d -> d.getString(ID))
-				.collect(Collectors.toSet());
-	}
-
 	@SuppressWarnings("unchecked")
-	private void updateUserResourceStatuses(String user, EnvResourceList list, Document exp) {
+	private void updateUserResourceStatuses(String user, String project, EnvResourceList list, Document exp) {
 		final String exploratoryName = exp.getString(EXPLORATORY_NAME);
 		getEnvResourceAndRemove(list.getHostList(), exp.getString(INSTANCE_ID))
-				.ifPresent(resource -> updateExploratoryStatus(user, exploratoryName, exp.getString(STATUS),
-						resource.getStatus()));
+				.ifPresent(resource -> updateExploratoryStatus(user, project, exploratoryName,
+						exp.getString(STATUS), resource.getStatus()));
 
 		(getComputationalResources(exp))
 				.stream()
 				.filter(this::instanceIdPresent)
-				.forEach(comp -> updateComputational(user, list, exploratoryName, comp));
+				.forEach(comp -> updateComputational(user, project, list, exploratoryName, comp));
 	}
 
-	private void updateComputational(String user, EnvResourceList list, String exploratoryName, Document comp) {
+	private void updateComputational(String user, String project, EnvResourceList list, String exploratoryName, Document comp) {
 		final List<EnvResource> listToCheck = DataEngineType.CLOUD_SERVICE ==
 				DataEngineType.fromDockerImageName(comp.getString(IMAGE)) ?
 				list.getClusterList() : list.getHostList();
 		getEnvResourceAndRemove(listToCheck, comp.getString(INSTANCE_ID))
-				.ifPresent(resource -> updateComputationalStatus(user, exploratoryName,
-						comp.getString(ComputationalDAO.COMPUTATIONAL_NAME), comp.getString(STATUS), resource
-								.getStatus()));
+				.ifPresent(resource -> updateComputationalStatus(user, project, exploratoryName,
+						comp.getString(ComputationalDAO.COMPUTATIONAL_NAME), comp.getString(STATUS), resource.getStatus()));
 	}
 
 	private boolean instanceIdPresent(Document d) {
@@ -339,11 +341,12 @@
 	 * Update the status of exploratory if it needed.
 	 *
 	 * @param user            the user name
+	 * @param project         project name
 	 * @param exploratoryName the name of exploratory
 	 * @param oldStatus       old status
 	 * @param newStatus       new status
 	 */
-	private void updateExploratoryStatus(String user, String exploratoryName,
+	private void updateExploratoryStatus(String user, String project, String exploratoryName,
 										 String oldStatus, String newStatus) {
 		LOGGER.trace("Update exploratory status for user {} with exploratory {} from {} to {}", user, exploratoryName,
 				oldStatus, newStatus);
@@ -356,7 +359,7 @@
 			LOGGER.debug("Exploratory status for user {} with exploratory {} will be updated from {} to {}", user,
 					exploratoryName, oldStatus, status);
 			updateOne(USER_INSTANCES,
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					Updates.set(STATUS, status.toString()));
 		}
 	}
@@ -399,12 +402,13 @@
 	 * Update the status of exploratory if it needed.
 	 *
 	 * @param user              the user name.
+	 * @param project           project name
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName the name of computational.
 	 * @param oldStatus         old status.
 	 * @param newStatus         new status.
 	 */
-	private void updateComputationalStatus(String user, String exploratoryName, String computationalName,
+	private void updateComputationalStatus(String user, String project, String exploratoryName, String computationalName,
 										   String oldStatus, String newStatus) {
 		LOGGER.trace("Update computational status for user {} with exploratory {} and computational {} from {} to {}",
 				user, exploratoryName, computationalName, oldStatus, newStatus);
@@ -420,12 +424,12 @@
 							"from {} to {}",
 					user, exploratoryName, computationalName, oldStatus, status);
 			if (status == UserInstanceStatus.TERMINATED &&
-					terminateComputationalSpot(user, exploratoryName, computationalName)) {
+					terminateComputationalSpot(user, project, exploratoryName, computationalName)) {
 				return;
 			}
 			Document values = new Document(COMPUTATIONAL_STATUS_FILTER, status.toString());
 			updateOne(USER_INSTANCES,
-					and(exploratoryCondition(user, exploratoryName),
+					and(exploratoryCondition(user, exploratoryName, project),
 							elemMatch(COMPUTATIONAL_RESOURCES,
 									and(eq(ComputationalDAO.COMPUTATIONAL_NAME, computationalName))
 							)
@@ -438,15 +442,16 @@
 	 * Terminate EMR if it is spot.
 	 *
 	 * @param user              the user name.
+	 * @param project           name of project
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName the name of computational.
 	 * @return <b>true</b> if computational is spot and should be terminated by docker, otherwise <b>false</b>.
 	 */
-	private boolean terminateComputationalSpot(String user, String exploratoryName, String computationalName) {
+	private boolean terminateComputationalSpot(String user, String project, String exploratoryName, String computationalName) {
 		LOGGER.trace("Check computatation is spot for user {} with exploratory {} and computational {}", user,
 				exploratoryName, computationalName);
 		Document doc = findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				and(elemMatch(COMPUTATIONAL_RESOURCES,
 						and(eq(ComputationalDAO.COMPUTATIONAL_NAME, computationalName),
 								eq(COMPUTATIONAL_SPOT, true),
@@ -471,7 +476,7 @@
 			ComputationalResourceAws computational = new ComputationalResourceAws();
 			SelfServiceApplication.getInjector().injectMembers(computational);
 			UserInfo ui = new UserInfo(user, accessToken);
-			computational.terminate(ui, exploratoryName, computationalName);
+			computational.terminate(ui, project, exploratoryName, computationalName);
 		} catch (Exception e) {
 			// Cannot terminate EMR, just update status to terminated
 			LOGGER.warn("Can't terminate computational for user {} with exploratory {} and computational {}. {}",
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
index 80a6b3c..b5ef6c2 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
@@ -61,7 +61,6 @@
 import static com.mongodb.client.model.Projections.include;
 import static com.mongodb.client.model.Updates.set;
 import static java.util.stream.Collectors.toList;
-import static org.apache.commons.lang3.StringUtils.EMPTY;
 
 /**
  * DAO for user exploratory.
@@ -91,22 +90,18 @@
 		log.info("{} is initialized", getClass().getSimpleName());
 	}
 
-	static Bson exploratoryCondition(String user, String exploratoryName) {
-		return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName));
+	static Bson exploratoryCondition(String user, String exploratoryName, String project) {
+		return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName), eq(PROJECT, project));
 	}
 
-	private Bson exploratoryStatusCondition(String user, UserInstanceStatus... exploratoryStatuses) {
-		return and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses)));
-	}
-
-	private static Bson runningExploratoryCondition(String user, String exploratoryName) {
-		return and(eq(USER, user),
+	private static Bson runningExploratoryCondition(String user, String exploratoryName, String project) {
+		return and(eq(USER, user), eq(PROJECT, project),
 				and(eq(EXPLORATORY_NAME, exploratoryName), eq(STATUS, UserInstanceStatus.RUNNING.toString())));
 	}
 
-	static Bson runningExploratoryAndComputationalCondition(String user, String exploratoryName, String
-			computationalName) {
-		return and(eq(USER, user),
+	static Bson runningExploratoryAndComputationalCondition(String user, String project, String exploratoryName,
+															String computationalName) {
+		return and(eq(USER, user), eq(PROJECT, project),
 				and(eq(EXPLORATORY_NAME, exploratoryName), eq(STATUS, UserInstanceStatus.RUNNING.toString()),
 						eq(COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_NAME, computationalName),
 						eq(COMPUTATIONAL_RESOURCES + "." + STATUS, UserInstanceStatus.RUNNING.toString())));
@@ -125,20 +120,6 @@
 	}
 
 	/**
-	 * Finds and returns the unique id for exploratory.
-	 *
-	 * @param user            user name.
-	 * @param exploratoryName the name of exploratory.
-	 */
-	public String fetchExploratoryId(String user, String exploratoryName) {
-		return findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
-				fields(include(EXPLORATORY_ID), excludeId()))
-				.orElse(new Document())
-				.getOrDefault(EXPLORATORY_ID, EMPTY).toString();
-	}
-
-	/**
 	 * Finds and returns the info of all user's running notebooks.
 	 *
 	 * @param user user name.
@@ -155,17 +136,18 @@
 		return getUserInstances(and(eq(PROJECT, project)), false);
 	}
 
+	public List<UserInstanceDTO> fetchExploratoryFieldsForProjectWithComp(String project) {
+		return getUserInstances(and(eq(PROJECT, project)), true);
+	}
+
+	public List<UserInstanceDTO> fetchExploratoryFieldsForProjectWithComp(List<String> projects) {
+		return getUserInstances(and(in(PROJECT, projects)), true);
+	}
+
 	public List<UserInstanceDTO> findExploratories(String project, String endpoint, String user) {
 		return getUserInstances(and(eq(PROJECT, project), eq(ENDPOINT, endpoint), eq(USER, user)), true);
 	}
 
-	/**
-	 * Finds and returns the info of all user's notebooks whose status is present among predefined ones.
-	 *
-	 * @param user                        user name.
-	 * @param computationalFieldsRequired true/false.
-	 * @param statuses                    array of statuses.
-	 */
 	public List<UserInstanceDTO> fetchUserExploratoriesWhereStatusIn(String user, boolean computationalFieldsRequired,
 																	 UserInstanceStatus... statuses) {
 		final List<String> statusList = statusList(statuses);
@@ -228,22 +210,6 @@
 				false);
 	}
 
-	/**
-	 * Finds and returns the info of all user's notebooks whose status is absent among predefined ones.
-	 *
-	 * @param user     user name.
-	 * @param statuses array of statuses.
-	 */
-	public List<UserInstanceDTO> fetchUserExploratoriesWhereStatusNotIn(String user, UserInstanceStatus... statuses) {
-		final List<String> statusList = statusList(statuses);
-		return getUserInstances(
-				and(
-						eq(USER, user),
-						not(in(STATUS, statusList))
-				),
-				false);
-	}
-
 	public List<UserInstanceDTO> fetchProjectExploratoriesWhereStatusNotIn(String project, String endpoint,
 																		   UserInstanceStatus... statuses) {
 		final List<String> statusList = statusList(statuses);
@@ -299,35 +265,25 @@
 	 * Finds and returns the info of exploratory (without info about computational resources).
 	 *
 	 * @param user            user name.
+	 * @param project         project name
 	 * @param exploratoryName the name of exploratory.
 	 */
-	public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName) {
-		return getExploratory(user, exploratoryName, false).orElseThrow(() ->
+	public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName) {
+		return getExploratory(user, project, exploratoryName, false).orElseThrow(() ->
 				new ResourceNotFoundException(String.format(EXPLORATORY_NOT_FOUND_MSG, user, exploratoryName)));
 
 	}
 
-	public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName,
-												  boolean includeComputationalResources) {
-		return getExploratory(user, exploratoryName, includeComputationalResources).orElseThrow(() ->
+	public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName, boolean includeCompResources) {
+		return getExploratory(user, project, exploratoryName, includeCompResources).orElseThrow(() ->
 				new ResourceNotFoundException(String.format(EXPLORATORY_NOT_FOUND_MSG, user, exploratoryName)));
 
 	}
 
-	/**
-	 * Checks if exploratory exists.
-	 *
-	 * @param user            user name.
-	 * @param exploratoryName the name of exploratory.
-	 */
-	public boolean isExploratoryExist(String user, String exploratoryName) {
-		return getExploratory(user, exploratoryName, false).isPresent();
-	}
-
-	private Optional<UserInstanceDTO> getExploratory(String user, String exploratoryName,
+	private Optional<UserInstanceDTO> getExploratory(String user, String project, String exploratoryName,
 													 boolean includeCompResources) {
 		return findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				includeCompResources ? null : fields(exclude(COMPUTATIONAL_RESOURCES)),
 				UserInstanceDTO.class);
 	}
@@ -336,12 +292,13 @@
 	 * Finds and returns the info of running exploratory with running cluster.
 	 *
 	 * @param user              user name.
+	 * @param project           name of project
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of cluster
 	 */
-	public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName, String computationalName) {
+	public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName, String computationalName) {
 		return findOne(USER_INSTANCES,
-				runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+				runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 				UserInstanceDTO.class)
 				.orElseThrow(() -> new DlabException(String.format("Running notebook %s with running cluster %s not " +
 								"found for user %s",
@@ -352,10 +309,11 @@
 	 * Finds and returns the info of running exploratory.
 	 *
 	 * @param user            user name.
+	 * @param project         project
 	 * @param exploratoryName name of exploratory.
 	 */
-	public UserInstanceDTO fetchRunningExploratoryFields(String user, String exploratoryName) {
-		return findOne(USER_INSTANCES, runningExploratoryCondition(user, exploratoryName),
+	public UserInstanceDTO fetchRunningExploratoryFields(String user, String project, String exploratoryName) {
+		return findOne(USER_INSTANCES, runningExploratoryCondition(user, exploratoryName, project),
 				fields(exclude(COMPUTATIONAL_RESOURCES)), UserInstanceDTO.class)
 				.orElseThrow(() -> new DlabException(
 						String.format("Running exploratory instance for user %s with name %s not found.",
@@ -379,34 +337,22 @@
 	 */
 	public UpdateResult updateExploratoryStatus(StatusEnvBaseDTO<?> dto) {
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+				exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 				set(STATUS, dto.getStatus()));
 	}
 
 	/**
-	 * Updates the status for all user's corresponding exploratories in Mongo database.
-	 *
-	 * @param newExploratoryStatus   new status for exploratories.
-	 * @param user                   user name.
-	 * @param oldExploratoryStatuses old statuses of exploratories.
-	 */
-	public void updateStatusForExploratories(UserInstanceStatus newExploratoryStatus, String user,
-											 UserInstanceStatus... oldExploratoryStatuses) {
-		updateMany(USER_INSTANCES, exploratoryStatusCondition(user, oldExploratoryStatuses),
-				set(STATUS, newExploratoryStatus.toString()));
-	}
-
-	/**
 	 * Updates status for single exploratory in Mongo database.
 	 *
 	 * @param user            user.
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory.
 	 * @param newStatus       new status of exploratory.
 	 * @return The result of an update operation.
 	 */
-	public UpdateResult updateStatusForExploratory(String user, String exploratoryName, UserInstanceStatus newStatus) {
+	public UpdateResult updateStatusForExploratory(String user, String project, String exploratoryName, UserInstanceStatus newStatus) {
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				set(STATUS, newStatus.toString()));
 	}
 
@@ -414,40 +360,29 @@
 	 * Updates the scheduler's data for exploratory in Mongo database.
 	 *
 	 * @param user            user.
+	 * @param project         name of project
 	 * @param exploratoryName name of exploratory.
 	 * @param dto             object of scheduler data.
 	 * @return The result of an update operation.
 	 */
-	public UpdateResult updateSchedulerDataForUserAndExploratory(String user, String exploratoryName,
+	public UpdateResult updateSchedulerDataForUserAndExploratory(String user, String project, String exploratoryName,
 																 SchedulerJobDTO dto) {
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				set(SCHEDULER_DATA, Objects.isNull(dto) ? null : convertToBson(dto)));
 	}
 
 	/**
-	 * Updates the requirement for reuploading key for all user's corresponding exploratories in Mongo database.
-	 *
-	 * @param user                user name.
-	 * @param reuploadKeyRequired true/false.
-	 * @param exploratoryStatuses statuses of exploratory.
-	 */
-	public void updateReuploadKeyForExploratories(String user, boolean reuploadKeyRequired,
-												  UserInstanceStatus... exploratoryStatuses) {
-		updateMany(USER_INSTANCES, exploratoryStatusCondition(user, exploratoryStatuses),
-				set(REUPLOAD_KEY_REQUIRED, reuploadKeyRequired));
-	}
-
-	/**
 	 * Updates the requirement for reuploading key for single exploratory in Mongo database.
 	 *
 	 * @param user                user name.
+	 * @param project             project name
 	 * @param exploratoryName     exploratory's name
 	 * @param reuploadKeyRequired true/false.
 	 */
-	public void updateReuploadKeyForExploratory(String user, String exploratoryName, boolean reuploadKeyRequired) {
+	public void updateReuploadKeyForExploratory(String user, String project, String exploratoryName, boolean reuploadKeyRequired) {
 		updateOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				set(REUPLOAD_KEY_REQUIRED, reuploadKeyRequired));
 	}
 
@@ -485,7 +420,7 @@
 							}
 					).collect(Collectors.toList()));
 		} else if (dto.getPrivateIp() != null) {
-			UserInstanceDTO inst = fetchExploratoryFields(dto.getUser(), dto.getExploratoryName());
+			UserInstanceDTO inst = fetchExploratoryFields(dto.getUser(), dto.getProject(), dto.getExploratoryName());
 			if (!inst.getPrivateIp().equals(dto.getPrivateIp()) && inst.getResourceUrl() != null) {
 				values.append(EXPLORATORY_URL, inst.getResourceUrl().stream()
 						.map(url -> replaceIp(dto.getPrivateIp(), inst, url))
@@ -506,13 +441,13 @@
 			values.append(CLUSTER_CONFIG, dto.getConfig().stream().map(this::convertToBson).collect(toList()));
 		}
 		return updateOne(USER_INSTANCES,
-				exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+				exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 				new Document(SET, values));
 	}
 
-	public void updateExploratoryIp(String user, String ip, String exploratoryName) {
+	public void updateExploratoryIp(String user, String project, String ip, String exploratoryName) {
 
-		UserInstanceDTO inst = fetchExploratoryFields(user, exploratoryName);
+		UserInstanceDTO inst = fetchExploratoryFields(user, project, exploratoryName);
 		if (!inst.getPrivateIp().equals(ip)) {
 			Document values = new Document();
 			values.append(EXPLORATORY_PRIVATE_IP, ip);
@@ -523,15 +458,15 @@
 			}
 
 			updateOne(USER_INSTANCES,
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					new Document(SET, values));
 		}
 
 	}
 
 	@SuppressWarnings("unchecked")
-	public List<ClusterConfig> getClusterConfig(String user, String exploratoryName) {
-		return findOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName), notNull(CLUSTER_CONFIG)),
+	public List<ClusterConfig> getClusterConfig(String user, String project, String exploratoryName) {
+		return findOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project), notNull(CLUSTER_CONFIG)),
 				fields(include(CLUSTER_CONFIG), excludeId()))
 				.map(d -> convertFromDocument((List<Document>) d.get(CLUSTER_CONFIG),
 						new TypeReference<List<ClusterConfig>>() {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
index ebdd028..bcec258 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
@@ -38,11 +38,16 @@
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.runningExploratoryAndComputationalCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
 import static com.mongodb.client.model.Filters.and;
 import static com.mongodb.client.model.Filters.eq;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.elemMatch;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static com.mongodb.client.model.Updates.push;
 
 /**
@@ -98,17 +103,17 @@
 		return COMPUTATIONAL_LIBS + "." + computational + FIELD_SET_DELIMETER + fieldName;
 	}
 
-	private Document findLibraries(String user, String exploratoryName, Bson include) {
+	private Document findLibraries(String user, String project, String exploratoryName, Bson include) {
 		Optional<Document> opt = findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				fields(excludeId(), include));
 
 		return opt.orElseGet(Document::new);
 
 	}
 
-	public List<Library> getLibraries(String user, String exploratoryName) {
-		final Document libsDocument = findAllLibraries(user, exploratoryName);
+	public List<Library> getLibraries(String user, String project, String exploratoryName) {
+		final Document libsDocument = findAllLibraries(user, project, exploratoryName);
 		return Stream
 				.concat(
 						libraryStream(libsDocument, exploratoryName, EXPLORATORY_LIBS, ResourceType.EXPLORATORY),
@@ -116,24 +121,23 @@
 				.collect(Collectors.toList());
 	}
 
-	public Document findAllLibraries(String user, String exploratoryName) {
-		return findLibraries(user, exploratoryName, include(EXPLORATORY_LIBS, COMPUTATIONAL_LIBS,
+	public Document findAllLibraries(String user, String project, String exploratoryName) {
+		return findLibraries(user, project, exploratoryName, include(EXPLORATORY_LIBS, COMPUTATIONAL_LIBS,
 				COMPUTATIONAL_RESOURCES));
 	}
 
-	public Document findExploratoryLibraries(String user, String exploratoryName) {
-		return findLibraries(user, exploratoryName, include(EXPLORATORY_LIBS));
+	public Document findExploratoryLibraries(String user, String project, String exploratoryName) {
+		return findLibraries(user, project, exploratoryName, include(EXPLORATORY_LIBS));
 	}
 
-	public Document findComputationalLibraries(String user, String exploratoryName, String computationalName) {
-		return findLibraries(user, exploratoryName, include(COMPUTATIONAL_LIBS + "." + computationalName));
+	public Document findComputationalLibraries(String user, String project, String exploratoryName, String computationalName) {
+		return findLibraries(user, project, exploratoryName, include(COMPUTATIONAL_LIBS + "." + computationalName));
 	}
 
 	@SuppressWarnings("unchecked")
-	public Library getLibrary(String user, String exploratoryName,
-							  String libraryGroup, String libraryName) {
+	public Library getLibrary(String user, String project, String exploratoryName, String libraryGroup, String libraryName) {
 		Optional<Document> userInstance = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						elemMatch(EXPLORATORY_LIBS,
 								and(eq(LIB_GROUP, libraryGroup), eq(LIB_NAME, libraryName))
 						)),
@@ -153,10 +157,10 @@
 	}
 
 	@SuppressWarnings("unchecked")
-	public Library getLibrary(String user, String exploratoryName, String computationalName,
+	public Library getLibrary(String user, String project, String exploratoryName, String computationalName,
 							  String libraryGroup, String libraryName) {
 		Optional<Document> libraryStatus = findOne(USER_INSTANCES,
-				and(runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+				and(runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 						libraryConditionComputational(computationalName, libraryGroup, libraryName)
 				),
 
@@ -184,18 +188,19 @@
 	 * Add the user's library for exploratory into database.
 	 *
 	 * @param user            user name.
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory.
 	 * @param library         library.
 	 * @return <b>true</b> if operation was successful, otherwise <b>false</b>.
 	 */
-	public boolean addLibrary(String user, String exploratoryName, LibInstallDTO library, boolean reinstall) {
+	public boolean addLibrary(String user, String project, String exploratoryName, LibInstallDTO library, boolean reinstall) {
 		Optional<Document> opt = findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName),
+				and(exploratoryCondition(user, exploratoryName, project),
 						elemMatch(EXPLORATORY_LIBS,
 								and(eq(LIB_GROUP, library.getGroup()), eq(LIB_NAME, library.getName())))));
 		if (!opt.isPresent()) {
 			updateOne(USER_INSTANCES,
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					push(EXPLORATORY_LIBS, convertToBson(library)));
 			return true;
 		} else {
@@ -205,7 +210,7 @@
 				values.append(libraryFieldFilter(LIB_ERROR_MESSAGE), null);
 			}
 
-			updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName),
+			updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project),
 					elemMatch(EXPLORATORY_LIBS,
 							and(eq(LIB_GROUP, library.getGroup()), eq(LIB_NAME, library.getName())))),
 					new Document(SET, values));
@@ -217,22 +222,23 @@
 	 * Add the user's library for exploratory into database.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational.
 	 * @param library           library.
 	 * @return <b>true</b> if operation was successful, otherwise <b>false</b>.
 	 */
-	public boolean addLibrary(String user, String exploratoryName, String computationalName,
+	public boolean addLibrary(String user, String project, String exploratoryName, String computationalName,
 							  LibInstallDTO library, boolean reinstall) {
 
 		Optional<Document> opt = findOne(USER_INSTANCES,
-				and(runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+				and(runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 						eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_GROUP, library.getGroup()),
 						eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_NAME, library.getName())));
 
 		if (!opt.isPresent()) {
 			updateOne(USER_INSTANCES,
-					runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+					runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
 					push(COMPUTATIONAL_LIBS + "." + computationalName, convertToBson(library)));
 			return true;
 		} else {
@@ -243,7 +249,7 @@
 			}
 
 			updateOne(USER_INSTANCES, and(
-					exploratoryCondition(user, exploratoryName),
+					exploratoryCondition(user, exploratoryName, project),
 					eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_GROUP, library.getGroup()),
 					eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_NAME, library.getName())),
 
@@ -276,7 +282,7 @@
 				Document values = updateLibraryFields(lib, dto.getUptime());
 
 				updateOne(USER_INSTANCES,
-						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 								libraryConditionExploratory(lib.getGroup(), lib.getName())),
 						new Document(SET, values));
 			} catch (Exception e) {
@@ -292,7 +298,7 @@
 				Document values = updateComputationalLibraryFields(dto.getComputationalName(), lib, dto.getUptime());
 
 				updateOne(USER_INSTANCES,
-						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+						and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
 								elemMatch(COMPUTATIONAL_LIBS + "." + dto.getComputationalName(),
 										libCondition(lib.getGroup(), lib.getName()))),
 						new Document(SET, values));
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
index 3d3fb36..f6e8bb6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
@@ -19,8 +19,8 @@
 
 package com.epam.dlab.backendapi.dao;
 
-import com.mongodb.BasicDBObject;
 import com.mongodb.client.model.IndexOptions;
+import com.mongodb.client.model.Indexes;
 import io.dropwizard.lifecycle.Managed;
 
 import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
@@ -28,12 +28,11 @@
 
 /** Creates the indexes for mongo collections. */
 public class IndexCreator extends BaseDAO implements Managed {
+    private static final String PROJECT_FIELD = "project";
     @Override
 	public void start() {
         mongoService.getCollection(USER_INSTANCES)
-        		.createIndex(new BasicDBObject(USER, 1)
-        		.append(EXPLORATORY_NAME, 2),
-                new IndexOptions().unique(true));
+                .createIndex(Indexes.ascending(USER, EXPLORATORY_NAME, PROJECT_FIELD), new IndexOptions().unique(true));
         // TODO: Make refactoring and append indexes for other mongo collections
     }
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java
index 245df5b..bce53c3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAO.java
@@ -12,8 +12,6 @@
 public interface ProjectDAO {
 	List<ProjectDTO> getProjects();
 
-	List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status);
-
 	List<ProjectDTO> getProjectsWithEndpointStatusNotIn(UserInstanceStatus... statuses);
 
 	List<ProjectDTO> getUserProjects(UserInfo userInfo, boolean active);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java
index 7128c51..ac3cfa3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ProjectDAOImpl.java
@@ -18,7 +18,11 @@
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
-import static com.mongodb.client.model.Filters.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.elemMatch;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.not;
 
 public class ProjectDAOImpl extends BaseDAO implements ProjectDAO {
 
@@ -46,11 +50,6 @@
 	}
 
 	@Override
-	public List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status) {
-		return find(PROJECTS_COLLECTION, eq(STATUS_FIELD, status.toString()), ProjectDTO.class);
-	}
-
-	@Override
 	public List<ProjectDTO> getProjectsWithEndpointStatusNotIn(UserInstanceStatus... statuses) {
 		final List<String> statusList =
 				Arrays.stream(statuses).map(UserInstanceStatus::name).collect(Collectors.toList());
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
index 2fbb299..fc292dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
@@ -31,19 +31,35 @@
 import org.bson.Document;
 import org.bson.conversions.Bson;
 
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 import static com.epam.dlab.backendapi.dao.ComputationalDAO.COMPUTATIONAL_NAME;
-import static com.epam.dlab.backendapi.dao.ComputationalDAO.PROJECT;
 import static com.epam.dlab.backendapi.dao.ComputationalDAO.IMAGE;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ComputationalDAO.PROJECT;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
 import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
 import static com.epam.dlab.dto.base.DataEngineType.fromDockerImageName;
-import static com.mongodb.client.model.Filters.*;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.exists;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.lte;
+import static com.mongodb.client.model.Filters.ne;
+import static com.mongodb.client.model.Filters.or;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
 import static java.util.stream.Collectors.toList;
 
 /**
@@ -78,12 +94,13 @@
 	 * Finds and returns the info of user's single scheduler job by exploratory name.
 	 *
 	 * @param user            user name.
+	 * @param project         project name
 	 * @param exploratoryName the name of exploratory.
 	 * @return scheduler job data.
 	 */
-	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobByUserAndExploratory(String user, String exploratoryName) {
+	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobByUserAndExploratory(String user, String project, String exploratoryName) {
 		return findOne(USER_INSTANCES,
-				and(exploratoryCondition(user, exploratoryName), schedulerNotNullCondition()),
+				and(exploratoryCondition(user, exploratoryName, project), schedulerNotNullCondition()),
 				fields(include(SCHEDULER_DATA), excludeId()))
 				.map(d -> convertFromDocument((Document) d.get(SCHEDULER_DATA), SchedulerJobDTO.class));
 	}
@@ -92,16 +109,17 @@
 	 * Finds and returns the info of user's single scheduler job for computational resource.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   the name of exploratory.
 	 * @param computationalName the name of computational resource.
 	 * @return scheduler job data.
 	 */
 
 	@SuppressWarnings("unchecked")
-	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobForCluster(String user, String exploratoryName,
+	public Optional<SchedulerJobDTO> fetchSingleSchedulerJobForCluster(String user, String project, String exploratoryName,
 																	   String computationalName) {
 		return findOne(USER_INSTANCES,
-				exploratoryCondition(user, exploratoryName),
+				exploratoryCondition(user, exploratoryName, project),
 				fields(include(COMPUTATIONAL_RESOURCES), excludeId()))
 				.map(d -> (List<Document>) d.get(COMPUTATIONAL_RESOURCES))
 				.map(list -> list.stream().filter(d -> d.getString(COMPUTATIONAL_NAME).equals(computationalName))
@@ -140,7 +158,7 @@
 								eq(CONSIDER_INACTIVITY_FLAG, false)
 						)
 				),
-				fields(excludeId(), include(USER, EXPLORATORY_NAME, SCHEDULER_DATA))))
+				fields(excludeId(), include(USER, PROJECT, EXPLORATORY_NAME, SCHEDULER_DATA))))
 				.map(d -> convertFromDocument(d, SchedulerJobData.class))
 				.collect(toList());
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java
index 0a4dde5..ae221f1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDao.java
@@ -25,8 +25,6 @@
 
 	void updateUsers(String group, Set<String> users);
 
-	void removeUser(String group, String user);
-
 	void removeGroup(String groupId);
 
 	Set<String> getUserGroups(String user);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java
index 03a6f51..cc0da31 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserGroupDaoImpl.java
@@ -44,11 +44,6 @@
 	}
 
 	@Override
-	public void removeUser(String group, String user) {
-		updateOne(USER_GROUPS, eq(ID, group), pull(USERS_FIELD, user));
-	}
-
-	@Override
 	public void removeGroup(String groupId) {
 		deleteOne(USER_GROUPS, eq(ID, groupId));
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java
index c2a401b..c9b5585 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDao.java
@@ -39,8 +39,6 @@
 
 	boolean addGroupToRole(Set<String> groups, Set<String> roleIds);
 
-	boolean removeGroupFromRole(Set<String> groups, Set<String> roleIds);
-
 	void removeGroupWhenRoleNotIn(String group, Set<String> roleIds);
 
 	void removeUnnecessaryRoles(CloudProvider cloudProviderToBeRemoved, List<CloudProvider> remainingProviders);
@@ -49,5 +47,5 @@
 
 	boolean removeGroup(String groupId);
 
-	List<UserGroupDto> aggregateRolesByGroup();
+	List<UserGroupDto> aggregateRolesByGroup(boolean isAdmin);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
index 0767be4..c256791 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
@@ -24,7 +24,9 @@
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.inject.Singleton;
+import com.mongodb.client.model.Aggregates;
 import com.mongodb.client.model.BsonField;
+import com.mongodb.client.model.Filters;
 import com.mongodb.client.result.UpdateResult;
 import org.bson.Document;
 import org.bson.conversions.Bson;
@@ -55,6 +57,8 @@
 	private static final String USERS_FIELD = "users";
 	private static final String GROUPS_FIELD = "groups";
 	private static final String DESCRIPTION = "description";
+	private static final String TYPE = "type";
+	private static final String CLOUD = "cloud";
 	private static final String ROLES = "roles";
 	private static final String GROUPS = "$groups";
 	private static final String GROUP = "group";
@@ -63,6 +67,7 @@
 	private static final String EXPLORATORIES_FIELD = "exploratories";
 	private static final String COMPUTATIONALS_FIELD = "computationals";
 	private static final String GROUP_INFO = "groupInfo";
+	private static final String ADMIN = "admin";
 
 
 	@Override
@@ -104,11 +109,6 @@
 	}
 
 	@Override
-	public boolean removeGroupFromRole(Set<String> groups, Set<String> roleIds) {
-		return conditionMatched(updateMany(MongoCollections.ROLES, in(ID, roleIds), pullAll(GROUPS_FIELD, groups)));
-	}
-
-	@Override
 	public void removeGroupWhenRoleNotIn(String group, Set<String> roleIds) {
 		updateMany(MongoCollections.ROLES, not(in(ID, roleIds)), pull(GROUPS_FIELD, group));
 	}
@@ -142,15 +142,19 @@
 	}
 
 	@Override
-	public List<UserGroupDto> aggregateRolesByGroup() {
+	public List<UserGroupDto> aggregateRolesByGroup(boolean isAdmin) {
 		final Document role = roleDocument();
 		final Bson groupBy = group(GROUPS, new BsonField(ROLES, new Document(ADD_TO_SET, role)));
 		final Bson lookup = lookup(USER_GROUPS, ID, ID, GROUP_INFO);
-		final List<Bson> pipeline = Arrays.asList(unwind(GROUPS), groupBy, lookup,
+		final List<Bson> pipeline = new ArrayList<>();
+		if (!isAdmin) {
+			pipeline.add(Aggregates.match(Filters.not(eq(ID, ADMIN))));
+		}
+		pipeline.addAll(Arrays.asList(unwind(GROUPS), groupBy, lookup,
 				project(new Document(GROUP, "$" + ID).append(GROUP_INFO, elementAt(GROUP_INFO, 0))
 						.append(ROLES, "$" + ROLES)),
 				project(new Document(GROUP, "$" + ID).append(USERS_FIELD, "$" + GROUP_INFO + "." + USERS_FIELD)
-						.append(ROLES, "$" + ROLES)));
+						.append(ROLES, "$" + ROLES))));
 
 		return stream(aggregate(MongoCollections.ROLES, pipeline))
 				.map(d -> convertFromDocument(d, UserGroupDto.class))
@@ -169,6 +173,8 @@
 	private Document roleDocument() {
 		return new Document().append(ID, "$" + ID)
 				.append(DESCRIPTION, "$" + DESCRIPTION)
+				.append(TYPE, "$" + TYPE)
+				.append(CLOUD, "$" + CLOUD)
 				.append(USERS_FIELD, "$" + USERS_FIELD)
 				.append(EXPLORATORY_SHAPES_FIELD, "$" + EXPLORATORY_SHAPES_FIELD)
 				.append(PAGES_FIELD, "$" + PAGES_FIELD)
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsBillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsBillingDAO.java
deleted file mode 100644
index fde1d8f..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/aws/AwsBillingDAO.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.aws;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-
-import java.util.Collections;
-import java.util.List;
-
-import static com.epam.dlab.model.aws.ReportLine.FIELD_COST;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_CURRENCY_CODE;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_DLAB_ID;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_PRODUCT;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_RESOURCE_TYPE;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_USAGE_DATE;
-import static com.mongodb.client.model.Accumulators.max;
-import static com.mongodb.client.model.Accumulators.min;
-import static com.mongodb.client.model.Accumulators.sum;
-import static com.mongodb.client.model.Aggregates.group;
-import static com.mongodb.client.model.Aggregates.sort;
-
-/**
- * DAO for user billing.
- */
-public class AwsBillingDAO extends BaseBillingDAO {
-
-    public static final String DLAB_RESOURCE_TYPE = "dlab_resource_type";
-    public static final String USAGE_DATE_START = "from";
-    public static final String USAGE_DATE_END = "to";
-    public static final String TAG_RESOURCE_ID = "tag_resource_id";
-
-    @Override
-    protected Bson sortCriteria() {
-        return sort(new Document(ID + "." + USER, 1)
-                .append(ID + "." + FIELD_DLAB_ID, 1)
-                .append(ID + "." + DLAB_RESOURCE_TYPE, 1)
-                .append(ID + "." + FIELD_PRODUCT, 1));
-    }
-
-    @Override
-    protected Bson groupCriteria() {
-        return group(getGroupingFields(USER, FIELD_DLAB_ID, DLAB_RESOURCE_TYPE, FIELD_PRODUCT, FIELD_RESOURCE_TYPE,
-                FIELD_CURRENCY_CODE, FIELD_PROJECT),
-                sum(FIELD_COST, "$" + FIELD_COST),
-                min(MongoKeyWords.USAGE_FROM, "$" + FIELD_USAGE_DATE),
-                max(MongoKeyWords.USAGE_TO, "$" + FIELD_USAGE_DATE));
-    }
-
-    @Override
-    protected List<Bson> cloudMatchCriteria(BillingFilter filter) {
-        return Collections.emptyList();
-    }
-}
\ No newline at end of file
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureBillingDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureBillingDAO.java
deleted file mode 100644
index 8eeb52c..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/azure/AzureBillingDAO.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.azure;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.billing.DlabResourceType;
-import com.google.inject.Singleton;
-import com.mongodb.client.model.Accumulators;
-import com.mongodb.client.model.Aggregates;
-import com.mongodb.client.model.Filters;
-import com.mongodb.client.model.Sorts;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-
-import java.util.Collections;
-import java.util.List;
-
-@Singleton
-@Slf4j
-public class AzureBillingDAO extends BaseBillingDAO {
-	public static final String SIZE = "size";
-
-	@Override
-	protected List<Bson> cloudMatchCriteria(BillingFilter filter) {
-		if (!filter.getProducts().isEmpty()) {
-			return Collections.singletonList(Filters.in(MongoKeyWords.METER_CATEGORY, filter.getProducts()));
-		} else {
-			return Collections.emptyList();
-		}
-	}
-
-	@Override
-	protected Bson groupCriteria() {
-		return Aggregates.group(getGroupingFields(
-				MongoKeyWords.DLAB_USER,
-				MongoKeyWords.DLAB_ID,
-				MongoKeyWords.RESOURCE_TYPE,
-				MongoKeyWords.METER_CATEGORY,
-				MongoKeyWords.CURRENCY_CODE,
-				FIELD_PROJECT),
-				Accumulators.sum(MongoKeyWords.COST, MongoKeyWords.prepend$(MongoKeyWords.COST)),
-				Accumulators.min(MongoKeyWords.USAGE_FROM, MongoKeyWords.prepend$(MongoKeyWords.USAGE_DAY)),
-				Accumulators.max(MongoKeyWords.USAGE_TO, MongoKeyWords.prepend$(MongoKeyWords.USAGE_DAY))
-		);
-	}
-
-	@Override
-	protected Bson sortCriteria() {
-		return Aggregates.sort(Sorts.ascending(
-				MongoKeyWords.prependId(MongoKeyWords.DLAB_USER),
-				MongoKeyWords.prependId(MongoKeyWords.DLAB_ID),
-				MongoKeyWords.prependId(MongoKeyWords.RESOURCE_TYPE),
-				MongoKeyWords.prependId(MongoKeyWords.METER_CATEGORY)));
-	}
-
-	@Override
-	protected String getServiceBaseName() {
-		return settings.getServiceBaseName().replace("_", "-").toLowerCase();
-	}
-
-	@Override
-	protected String shapeFieldName() {
-		return SIZE;
-	}
-
-	@Override
-	protected String dlabIdFieldName() {
-		return MongoKeyWords.DLAB_ID;
-	}
-
-	@Override
-	protected String productFieldName() {
-		return MongoKeyWords.METER_CATEGORY;
-	}
-
-	@Override
-	protected String costFieldName() {
-		return MongoKeyWords.COST_STRING;
-	}
-
-	@Override
-	protected String usageDateFromFieldName() {
-		return MongoKeyWords.USAGE_FROM;
-	}
-
-	@Override
-	protected String usageDateToFieldName() {
-		return MongoKeyWords.USAGE_TO;
-	}
-
-	@Override
-	protected String currencyCodeFieldName() {
-		return MongoKeyWords.CURRENCY_CODE;
-	}
-
-	@Override
-	protected String resourceType(Document id) {
-		return DlabResourceType.getResourceTypeName(id.getString(MongoKeyWords.RESOURCE_TYPE));
-	}
-
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpBillingDao.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpBillingDao.java
deleted file mode 100644
index 1105066..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/gcp/GcpBillingDao.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.dao.gcp;
-
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-
-import java.util.Collections;
-import java.util.List;
-
-import static com.epam.dlab.MongoKeyWords.USAGE_FROM;
-import static com.epam.dlab.MongoKeyWords.USAGE_TO;
-import static com.epam.dlab.backendapi.dao.aws.AwsBillingDAO.DLAB_RESOURCE_TYPE;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_COST;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_DLAB_ID;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_PRODUCT;
-import static com.epam.dlab.model.aws.ReportLine.FIELD_USAGE_DATE;
-import static com.mongodb.client.model.Accumulators.max;
-import static com.mongodb.client.model.Accumulators.min;
-import static com.mongodb.client.model.Accumulators.sum;
-import static com.mongodb.client.model.Aggregates.group;
-import static com.mongodb.client.model.Aggregates.sort;
-
-public class GcpBillingDao extends BaseBillingDAO {
-    @Override
-    protected Bson sortCriteria() {
-        return sort(new Document(ID + "." + USER, 1)
-                .append(ID + "." + FIELD_DLAB_ID, 1)
-                .append(ID + "." + FIELD_PRODUCT, 1));
-    }
-
-    @Override
-    protected Bson groupCriteria() {
-        return group(getGroupingFields(USER, FIELD_DLAB_ID, DLAB_RESOURCE_TYPE, FIELD_PRODUCT,
-                currencyCodeFieldName(), FIELD_PROJECT),
-                sum(FIELD_COST, "$" + FIELD_COST),
-                min(USAGE_FROM, "$" + FIELD_USAGE_DATE),
-                max(USAGE_TO, "$" + FIELD_USAGE_DATE)
-        );
-    }
-
-    @Override
-    protected List<Bson> cloudMatchCriteria(BillingFilter filter) {
-        return Collections.emptyList();
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BaseShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BaseShape.java
deleted file mode 100644
index 4a56034..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/BaseShape.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.backendapi.service.ShapeFormat;
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.AllArgsConstructor;
-import lombok.Data;
-import lombok.NoArgsConstructor;
-
-import java.util.Map;
-
-@Data
-@AllArgsConstructor
-@NoArgsConstructor
-public class BaseShape implements ShapeFormat {
-    protected String shape;
-    protected UserInstanceStatus status;
-    protected Map<String, String> tags;
-
-    @Override
-    public String format() {
-        return shape;
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
index f7f89de..bfee5b3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
@@ -15,7 +15,7 @@
 	private final Set<String> groups;
 	@NotNull final Set<String> endpoints;
 	@NotNull
-	@Pattern(regexp = "^ssh-.*\\n?", message = "Wrong key format. Key should be in openSSH format")
+	@Pattern(regexp = "^ssh-.*\\n?", message = "format is incorrect. Please use the openSSH format")
 	private final String key;
 	@NotNull
 	private final String tag;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineServiceShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineServiceShape.java
deleted file mode 100644
index 73c0193..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineServiceShape.java
+++ /dev/null
@@ -1,37 +0,0 @@
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.backendapi.service.ShapeFormat;
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Map;
-
-
-@Slf4j
-public class DataEngineServiceShape extends BaseShape implements ShapeFormat {
-    private static final String DES_NAME_FORMAT = "Master: %s%sSlave:  %d x %s";
-    private String slaveCount;
-    private String slaveShape;
-
-    @Builder
-    public DataEngineServiceShape(String shape, UserInstanceStatus status, String slaveCount, String slaveShape,
-                                  Map<String, String> tags) {
-        super(shape, status, tags);
-        this.slaveCount = slaveCount;
-        this.slaveShape = slaveShape;
-    }
-
-    @Override
-    public String format() {
-        Integer count;
-        try {
-            count = Integer.valueOf(slaveCount);
-        } catch (NumberFormatException e) {
-            log.error("Cannot parse string {} to integer", slaveCount);
-            return StringUtils.EMPTY;
-        }
-        return String.format(DES_NAME_FORMAT, shape, System.lineSeparator(), count - 1, slaveShape);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineShape.java
deleted file mode 100644
index 8d4c003..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/DataEngineShape.java
+++ /dev/null
@@ -1,34 +0,0 @@
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.backendapi.service.ShapeFormat;
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Map;
-
-@Slf4j
-public class DataEngineShape extends BaseShape implements ShapeFormat {
-    private static final String DE_NAME_FORMAT = "%d x %s";
-    private String slaveCount;
-
-
-    @Builder
-    public DataEngineShape(String shape, UserInstanceStatus status, String slaveCount, Map<String, String> tags) {
-        super(shape, status, tags);
-        this.slaveCount = slaveCount;
-    }
-
-    @Override
-    public String format() {
-        Integer count;
-        try {
-            count = Integer.valueOf(slaveCount);
-        } catch (NumberFormatException e) {
-            log.error("Cannot parse string {} to integer", slaveCount);
-            return StringUtils.EMPTY;
-        }
-        return String.format(DE_NAME_FORMAT, count, shape);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
index 6b6e978..f288a68 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
@@ -33,11 +33,11 @@
 public class EndpointDTO {
 
 	private static final String URL_REGEXP_VALIDATION = "^(http(s)?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]";
-	@NotEmpty
-	@NotBlank
+	@NotBlank(message = "field cannot be empty")
 	private final String name;
-	@URL(regexp = URL_REGEXP_VALIDATION, message = "endpoint field is in improper format!")
+	@URL(regexp = URL_REGEXP_VALIDATION, message = "field is in improper format!")
 	private final String url;
+	@NotBlank(message = "field cannot be empty")
 	private final String account;
 	@JsonProperty("endpoint_tag")
 	private final String tag;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointShape.java
deleted file mode 100644
index 5f41cad..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointShape.java
+++ /dev/null
@@ -1,14 +0,0 @@
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-
-import java.util.Collections;
-
-public class EndpointShape extends BaseShape {
-
-    @Builder
-    public EndpointShape(String shape, UserInstanceStatus status) {
-        super(shape, status, Collections.emptyMap());
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryShape.java
deleted file mode 100644
index 74ceab0..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ExploratoryShape.java
+++ /dev/null
@@ -1,14 +0,0 @@
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-
-import java.util.Map;
-
-public class ExploratoryShape extends BaseShape {
-
-    @Builder
-    public ExploratoryShape(String shape, UserInstanceStatus status, Map<String, String> tags) {
-        super(shape, status, tags);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
index eb53fc0..9cd0a35 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
@@ -21,7 +21,7 @@
 	@NotNull
 	private final Set<String> groups;
 	@NotNull
-	@Pattern(regexp = "^ssh-.*\\n", message = "Wrong key format. Key should be in openSSH format")
+	@Pattern(regexp = "^ssh-.*\\n", message = "format is incorrect. Please use the openSSH format")
 	private final String key;
 	@NotNull
 	private final String tag;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectManagingDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectManagingDTO.java
deleted file mode 100644
index 167128e..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectManagingDTO.java
+++ /dev/null
@@ -1,16 +0,0 @@
-package com.epam.dlab.backendapi.domain;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import lombok.AllArgsConstructor;
-import lombok.Data;
-
-
-@Data
-@JsonIgnoreProperties(ignoreUnknown = true)
-@AllArgsConstructor
-public class ProjectManagingDTO {
-    private String name;
-    private final Integer budget;
-    private boolean canBeStopped;
-    private boolean canBeTerminated;
-}
\ No newline at end of file
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/SsnShape.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/SsnShape.java
deleted file mode 100644
index a38a99e..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/SsnShape.java
+++ /dev/null
@@ -1,14 +0,0 @@
-package com.epam.dlab.backendapi.domain;
-
-import com.epam.dlab.dto.UserInstanceStatus;
-import lombok.Builder;
-
-import java.util.Collections;
-
-public class SsnShape extends BaseShape {
-
-    @Builder
-    public SsnShape(String shape, UserInstanceStatus status) {
-        super(shape, status, Collections.emptyMap());
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/interceptor/ProjectAdminInterceptor.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/interceptor/ProjectAdminInterceptor.java
new file mode 100644
index 0000000..a536dab
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/interceptor/ProjectAdminInterceptor.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.interceptor;
+
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.annotation.Project;
+import com.epam.dlab.backendapi.annotation.User;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.exceptions.DlabException;
+import com.epam.dlab.exceptions.ResourceQuoteReachedException;
+import com.google.inject.Inject;
+import lombok.extern.slf4j.Slf4j;
+import org.aopalliance.intercept.MethodInterceptor;
+import org.aopalliance.intercept.MethodInvocation;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Parameter;
+import java.util.Objects;
+import java.util.stream.IntStream;
+
+@Slf4j
+public class ProjectAdminInterceptor implements MethodInterceptor {
+    @Inject
+    private ProjectService projectService;
+
+    @Override
+    public Object invoke(MethodInvocation mi) throws Throwable {
+        if (grantAccess(mi)) {
+            return mi.proceed();
+        } else {
+            final Method method = mi.getMethod();
+            log.warn("Execution of method {} failed because user doesn't have appropriate permission", method.getName());
+            throw new ResourceQuoteReachedException("Operation can not be finished. User doesn't have appropriate permission");
+        }
+    }
+
+    private boolean grantAccess(MethodInvocation mi) {
+        final Parameter[] parameters = mi.getMethod().getParameters();
+        String project = IntStream.range(0, parameters.length)
+                .filter(i -> Objects.nonNull(parameters[i].getAnnotation(Project.class)))
+                .mapToObj(i -> (String) mi.getArguments()[i])
+                .findAny()
+                .orElseThrow(() -> new DlabException("Project parameter wanted!"));
+        UserInfo userInfo = IntStream.range(0, parameters.length)
+                .filter(i -> Objects.nonNull(parameters[i].getAnnotation(User.class)))
+                .mapToObj(i -> (UserInfo) mi.getArguments()[i])
+                .findAny()
+                .orElseThrow(() -> new DlabException("UserInfo parameter wanted!"));
+
+        return checkPermission(userInfo, project);
+    }
+
+    private boolean checkPermission(UserInfo userInfo, String project) {
+        return UserRoles.isAdmin(userInfo) || UserRoles.isProjectAdmin(userInfo, projectService.get(project).getGroups());
+    }
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AwsSelfServiceModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AwsSelfServiceModule.java
deleted file mode 100644
index 0fd45de..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AwsSelfServiceModule.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.modules;
-
-import com.epam.dlab.backendapi.SelfServiceApplication;
-import com.epam.dlab.backendapi.annotation.BudgetLimited;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.interceptor.BudgetLimitInterceptor;
-import com.epam.dlab.backendapi.resources.aws.ComputationalResourceAws;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.service.aws.AwsBillingService;
-import com.epam.dlab.cloud.CloudModule;
-import com.epam.dlab.mongo.MongoServiceFactory;
-import com.fiestacabin.dropwizard.quartz.SchedulerConfiguration;
-import com.google.inject.Injector;
-import com.google.inject.Provides;
-import com.google.inject.Singleton;
-import io.dropwizard.setup.Environment;
-import org.quartz.Scheduler;
-import org.quartz.SchedulerException;
-import org.quartz.impl.StdSchedulerFactory;
-
-import static com.google.inject.matcher.Matchers.annotatedWith;
-import static com.google.inject.matcher.Matchers.any;
-
-public class AwsSelfServiceModule extends CloudModule {
-
-	private static final String MONGO_URI_FORMAT = "mongodb://%s:%s@%s:%d/%s";
-	private static final String QUARTZ_MONGO_URI_PROPERTY = "org.quartz.jobStore.mongoUri";
-	private static final String QUARTZ_DB_NAME = "org.quartz.jobStore.dbName";
-
-	@Override
-	protected void configure() {
-		bind(BillingService.class).to(AwsBillingService.class);
-		bind(SchedulerConfiguration.class).toInstance(
-				new SchedulerConfiguration(SelfServiceApplication.class.getPackage().getName()));
-		bind(BillingDAO.class).to(AwsBillingDAO.class);
-		final BudgetLimitInterceptor budgetLimitInterceptor = new BudgetLimitInterceptor();
-		requestInjection(budgetLimitInterceptor);
-		bindInterceptor(any(), annotatedWith(BudgetLimited.class), budgetLimitInterceptor);
-	}
-
-	@Override
-	public void init(Environment environment, Injector injector) {
-		environment.jersey().register(injector.getInstance(ComputationalResourceAws.class));
-//
-
-		/*injector.getInstance(SecurityFactory.class).configure(injector, environment,
-				SelfServiceSecurityAuthenticator.class, injector.getInstance(Authorizer.class));*/
-	}
-
-
-	@Provides
-	@Singleton
-	Scheduler provideScheduler(SelfServiceApplicationConfiguration configuration) throws SchedulerException {
-		final MongoServiceFactory mongoFactory = configuration.getMongoFactory();
-		final String database = mongoFactory.getDatabase();
-		final String mongoUri = String.format(MONGO_URI_FORMAT, mongoFactory.getUsername(), mongoFactory.getPassword(),
-				mongoFactory.getHost(), mongoFactory.getPort(), database);
-		System.setProperty(QUARTZ_MONGO_URI_PROPERTY, mongoUri);
-		System.setProperty(QUARTZ_DB_NAME, database);
-		return StdSchedulerFactory.getDefaultScheduler();
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AzureSelfServiceModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AzureSelfServiceModule.java
deleted file mode 100644
index ee04041..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/AzureSelfServiceModule.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.modules;
-
-import com.epam.dlab.backendapi.SelfServiceApplication;
-import com.epam.dlab.backendapi.annotation.BudgetLimited;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.azure.AzureBillingDAO;
-import com.epam.dlab.backendapi.interceptor.BudgetLimitInterceptor;
-import com.epam.dlab.backendapi.resources.azure.ComputationalResourceAzure;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.service.azure.AzureBillingService;
-import com.epam.dlab.cloud.CloudModule;
-import com.epam.dlab.mongo.MongoServiceFactory;
-import com.fiestacabin.dropwizard.quartz.SchedulerConfiguration;
-import com.google.inject.Injector;
-import com.google.inject.Provides;
-import com.google.inject.Singleton;
-import io.dropwizard.setup.Environment;
-import lombok.extern.slf4j.Slf4j;
-import org.quartz.Scheduler;
-import org.quartz.SchedulerException;
-import org.quartz.impl.StdSchedulerFactory;
-
-import static com.google.inject.matcher.Matchers.annotatedWith;
-import static com.google.inject.matcher.Matchers.any;
-
-@Slf4j
-public class AzureSelfServiceModule extends CloudModule {
-
-	private static final String MONGO_URI_FORMAT = "mongodb://%s:%s@%s:%d/%s";
-	private static final String QUARTZ_MONGO_URI_PROPERTY = "org.quartz.jobStore.mongoUri";
-	private static final String QUARTZ_DB_NAME = "org.quartz.jobStore.dbName";
-
-	@Override
-	protected void configure() {
-		bind(BillingService.class).to(AzureBillingService.class);
-		bind(SchedulerConfiguration.class).toInstance(
-				new SchedulerConfiguration(SelfServiceApplication.class.getPackage().getName()));
-		bind(BillingDAO.class).to(AzureBillingDAO.class);
-		final BudgetLimitInterceptor budgetLimitInterceptor = new BudgetLimitInterceptor();
-		requestInjection(budgetLimitInterceptor);
-		bindInterceptor(any(), annotatedWith(BudgetLimited.class), budgetLimitInterceptor);
-	}
-
-	@Override
-	public void init(Environment environment, Injector injector) {
-		environment.jersey().register(injector.getInstance(ComputationalResourceAzure.class));
-
-	}
-
-	@Provides
-	@Singleton
-	Scheduler provideScheduler(SelfServiceApplicationConfiguration configuration) throws SchedulerException {
-		final MongoServiceFactory mongoFactory = configuration.getMongoFactory();
-		final String database = mongoFactory.getDatabase();
-		final String mongoUri = String.format(MONGO_URI_FORMAT, mongoFactory.getUsername(), mongoFactory.getPassword(),
-				mongoFactory.getHost(), mongoFactory.getPort(), database);
-		System.setProperty(QUARTZ_MONGO_URI_PROPERTY, mongoUri);
-		System.setProperty(QUARTZ_DB_NAME, database);
-		return StdSchedulerFactory.getDefaultScheduler();
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/CloudProviderModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/CloudProviderModule.java
index 0ee0d10..8b41baf 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/CloudProviderModule.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/CloudProviderModule.java
@@ -21,25 +21,19 @@
 
 import com.epam.dlab.backendapi.SelfServiceApplication;
 import com.epam.dlab.backendapi.annotation.BudgetLimited;
+import com.epam.dlab.backendapi.annotation.ProjectAdmin;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.dao.azure.AzureBillingDAO;
-import com.epam.dlab.backendapi.dao.gcp.GcpBillingDao;
 import com.epam.dlab.backendapi.interceptor.BudgetLimitInterceptor;
+import com.epam.dlab.backendapi.interceptor.ProjectAdminInterceptor;
 import com.epam.dlab.backendapi.resources.BillingResource;
 import com.epam.dlab.backendapi.resources.aws.ComputationalResourceAws;
 import com.epam.dlab.backendapi.resources.azure.ComputationalResourceAzure;
 import com.epam.dlab.backendapi.resources.gcp.ComputationalResourceGcp;
 import com.epam.dlab.backendapi.resources.gcp.GcpOauthResource;
 import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.service.BillingServiceNew;
 import com.epam.dlab.backendapi.service.InfrastructureInfoService;
 import com.epam.dlab.backendapi.service.InfrastructureTemplateService;
-import com.epam.dlab.backendapi.service.aws.AwsBillingService;
-import com.epam.dlab.backendapi.service.azure.AzureBillingService;
-import com.epam.dlab.backendapi.service.gcp.GcpBillingService;
-import com.epam.dlab.backendapi.service.impl.BillingServiceImplNew;
+import com.epam.dlab.backendapi.service.impl.BillingServiceImpl;
 import com.epam.dlab.backendapi.service.impl.InfrastructureInfoServiceImpl;
 import com.epam.dlab.backendapi.service.impl.InfrastructureTemplateServiceImpl;
 import com.epam.dlab.cloud.CloudModule;
@@ -70,16 +64,18 @@
 
     @Override
     protected void configure() {
-        bindBilling();
-        bind(BillingServiceNew.class).to(BillingServiceImplNew.class);
+        bind(BillingService.class).to(BillingServiceImpl.class);
         bind(InfrastructureInfoService.class).to(InfrastructureInfoServiceImpl.class);
         bind(InfrastructureTemplateService.class).to(InfrastructureTemplateServiceImpl.class);
         bind(SchedulerConfiguration.class).toInstance(
                 new SchedulerConfiguration(SelfServiceApplication.class.getPackage().getName()));
 
         final BudgetLimitInterceptor budgetLimitInterceptor = new BudgetLimitInterceptor();
+        final ProjectAdminInterceptor projectAdminInterceptor = new ProjectAdminInterceptor();
         requestInjection(budgetLimitInterceptor);
+        requestInjection(projectAdminInterceptor);
         bindInterceptor(any(), annotatedWith(BudgetLimited.class), budgetLimitInterceptor);
+        bindInterceptor(any(), annotatedWith(ProjectAdmin.class), projectAdminInterceptor);
     }
 
     @Override
@@ -104,23 +100,4 @@
         System.setProperty(QUARTZ_DB_NAME, database);
         return StdSchedulerFactory.getDefaultScheduler();
     }
-
-    private void bindBilling() {
-        switch (configuration.getCloudProvider()) {
-            case AWS:
-                bind(BillingService.class).to(AwsBillingService.class);
-                bind(BillingDAO.class).to(AwsBillingDAO.class);
-                break;
-            case AZURE:
-                bind(BillingService.class).to(AzureBillingService.class);
-                bind(BillingDAO.class).to(AzureBillingDAO.class);
-                break;
-            case GCP:
-                bind(BillingService.class).to(GcpBillingService.class);
-                bind(BillingDAO.class).to(GcpBillingDao.class);
-                break;
-            default:
-                throw new UnsupportedOperationException("Unsupported cloud provider " + configuration.getCloudProvider());
-        }
-    }
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java
index cf08d12..3c00111 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/DevModule.java
@@ -23,9 +23,66 @@
 import com.epam.dlab.auth.contract.SecurityAPI;
 import com.epam.dlab.backendapi.auth.SelfServiceSecurityAuthorizer;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.*;
-import com.epam.dlab.backendapi.service.*;
-import com.epam.dlab.backendapi.service.impl.*;
+import com.epam.dlab.backendapi.dao.BackupDao;
+import com.epam.dlab.backendapi.dao.BackupDaoImpl;
+import com.epam.dlab.backendapi.dao.BaseBillingDAO;
+import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAOImpl;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDaoImpl;
+import com.epam.dlab.backendapi.dao.ProjectDAO;
+import com.epam.dlab.backendapi.dao.ProjectDAOImpl;
+import com.epam.dlab.backendapi.dao.UserGroupDao;
+import com.epam.dlab.backendapi.dao.UserGroupDaoImpl;
+import com.epam.dlab.backendapi.dao.UserRoleDao;
+import com.epam.dlab.backendapi.dao.UserRoleDaoImpl;
+import com.epam.dlab.backendapi.service.AccessKeyService;
+import com.epam.dlab.backendapi.service.ApplicationSettingService;
+import com.epam.dlab.backendapi.service.ApplicationSettingServiceImpl;
+import com.epam.dlab.backendapi.service.BackupService;
+import com.epam.dlab.backendapi.service.ComputationalService;
+import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.EnvironmentService;
+import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.ExternalLibraryService;
+import com.epam.dlab.backendapi.service.GitCredentialService;
+import com.epam.dlab.backendapi.service.GuacamoleService;
+import com.epam.dlab.backendapi.service.ImageExploratoryService;
+import com.epam.dlab.backendapi.service.InactivityService;
+import com.epam.dlab.backendapi.service.KeycloakService;
+import com.epam.dlab.backendapi.service.KeycloakServiceImpl;
+import com.epam.dlab.backendapi.service.LibraryService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.backendapi.service.ReuploadKeyService;
+import com.epam.dlab.backendapi.service.SchedulerJobService;
+import com.epam.dlab.backendapi.service.SecurityService;
+import com.epam.dlab.backendapi.service.SecurityServiceImpl;
+import com.epam.dlab.backendapi.service.SystemInfoService;
+import com.epam.dlab.backendapi.service.TagService;
+import com.epam.dlab.backendapi.service.TagServiceImpl;
+import com.epam.dlab.backendapi.service.UserGroupService;
+import com.epam.dlab.backendapi.service.UserRoleService;
+import com.epam.dlab.backendapi.service.UserRoleServiceImpl;
+import com.epam.dlab.backendapi.service.UserSettingService;
+import com.epam.dlab.backendapi.service.UserSettingServiceImpl;
+import com.epam.dlab.backendapi.service.impl.AccessKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.BackupServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ComputationalServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EndpointServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EnvironmentServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GitCredentialServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GuacamoleServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ImageExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.InactivityServiceImpl;
+import com.epam.dlab.backendapi.service.impl.LibraryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.MavenCentralLibraryService;
+import com.epam.dlab.backendapi.service.impl.ProjectServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ReuploadKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SystemInfoServiceImpl;
+import com.epam.dlab.backendapi.service.impl.UserGroupServiceImpl;
 import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.mongo.MongoService;
 import com.epam.dlab.rest.client.RESTService;
@@ -106,6 +163,7 @@
 		bind(EndpointDAO.class).to(EndpointDAOImpl.class);
 		bind(ProjectService.class).to(ProjectServiceImpl.class);
 		bind(ProjectDAO.class).to(ProjectDAOImpl.class);
+		bind(BillingDAO.class).to(BaseBillingDAO.class);
 	}
 
 	private void configureCors(Environment environment) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/GcpSelfServiceModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/GcpSelfServiceModule.java
deleted file mode 100644
index 276238e..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/GcpSelfServiceModule.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.modules;
-
-import com.epam.dlab.backendapi.SelfServiceApplication;
-import com.epam.dlab.backendapi.annotation.BudgetLimited;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.gcp.GcpBillingDao;
-import com.epam.dlab.backendapi.interceptor.BudgetLimitInterceptor;
-import com.epam.dlab.backendapi.resources.gcp.ComputationalResourceGcp;
-import com.epam.dlab.backendapi.resources.gcp.GcpOauthResource;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.service.gcp.GcpBillingService;
-import com.epam.dlab.cloud.CloudModule;
-import com.epam.dlab.mongo.MongoServiceFactory;
-import com.fiestacabin.dropwizard.quartz.SchedulerConfiguration;
-import com.google.inject.Injector;
-import com.google.inject.Provides;
-import com.google.inject.Singleton;
-import io.dropwizard.setup.Environment;
-import org.quartz.Scheduler;
-import org.quartz.SchedulerException;
-import org.quartz.impl.StdSchedulerFactory;
-
-import static com.google.inject.matcher.Matchers.annotatedWith;
-import static com.google.inject.matcher.Matchers.any;
-
-public class GcpSelfServiceModule extends CloudModule {
-
-    private static final String MONGO_URI_FORMAT = "mongodb://%s:%s@%s:%d/%s";
-    private static final String QUARTZ_MONGO_URI_PROPERTY = "org.quartz.jobStore.mongoUri";
-    private static final String QUARTZ_DB_NAME = "org.quartz.jobStore.dbName";
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public void init(Environment environment, Injector injector) {
-		environment.jersey().register(injector.getInstance(ComputationalResourceGcp.class));
-		if (injector.getInstance(SelfServiceApplicationConfiguration.class).isGcpOuauth2AuthenticationEnabled()) {
-			environment.jersey().register(injector.getInstance(GcpOauthResource.class));
-		}
-
-    }
-
-    @Override
-    protected void configure() {
-        bind(BillingService.class).to(GcpBillingService.class);
-        bind(BillingDAO.class).to(GcpBillingDao.class);
-        bind(SchedulerConfiguration.class).toInstance(
-                new SchedulerConfiguration(SelfServiceApplication.class.getPackage().getName()));
-        final BudgetLimitInterceptor budgetLimitInterceptor = new BudgetLimitInterceptor();
-        requestInjection(budgetLimitInterceptor);
-        bindInterceptor(any(), annotatedWith(BudgetLimited.class), budgetLimitInterceptor);
-    }
-
-    @Provides
-    @Singleton
-    Scheduler provideScheduler(SelfServiceApplicationConfiguration configuration) throws SchedulerException {
-        final MongoServiceFactory mongoFactory = configuration.getMongoFactory();
-        final String database = mongoFactory.getDatabase();
-        final String mongoUri = String.format(MONGO_URI_FORMAT, mongoFactory.getUsername(), mongoFactory.getPassword(),
-                mongoFactory.getHost(), mongoFactory.getPort(), database);
-        System.setProperty(QUARTZ_MONGO_URI_PROPERTY, mongoUri);
-        System.setProperty(QUARTZ_DB_NAME, database);
-        return StdSchedulerFactory.getDefaultScheduler();
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java
index 1480fe7..eb8d3bc 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ModuleFactory.java
@@ -48,17 +48,4 @@
 	public static CloudModule getCloudProviderModule(SelfServiceApplicationConfiguration configuration) {
 		return new CloudProviderModule(configuration);
 	}
-
-	private static CloudModule getCloudModule(SelfServiceApplicationConfiguration configuration) {
-		switch (configuration.getCloudProvider()) {
-			case AWS:
-				return new AwsSelfServiceModule();
-			case AZURE:
-				return new AzureSelfServiceModule();
-			case GCP:
-				return new GcpSelfServiceModule();
-			default:
-				throw new UnsupportedOperationException("Unsupported cloud provider " + configuration.getCloudProvider());
-		}
-	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java
index 0d0ae1d..ce7ac26 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/modules/ProductionModule.java
@@ -20,11 +20,68 @@
 package com.epam.dlab.backendapi.modules;
 
 import com.epam.dlab.ModuleBase;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
 import com.epam.dlab.backendapi.auth.SelfServiceSecurityAuthorizer;
-import com.epam.dlab.backendapi.dao.*;
-import com.epam.dlab.backendapi.service.*;
-import com.epam.dlab.backendapi.service.impl.*;
+import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
+import com.epam.dlab.backendapi.dao.BackupDao;
+import com.epam.dlab.backendapi.dao.BackupDaoImpl;
+import com.epam.dlab.backendapi.dao.BaseBillingDAO;
+import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAO;
+import com.epam.dlab.backendapi.dao.EndpointDAOImpl;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDaoImpl;
+import com.epam.dlab.backendapi.dao.ProjectDAO;
+import com.epam.dlab.backendapi.dao.ProjectDAOImpl;
+import com.epam.dlab.backendapi.dao.UserGroupDao;
+import com.epam.dlab.backendapi.dao.UserGroupDaoImpl;
+import com.epam.dlab.backendapi.dao.UserRoleDao;
+import com.epam.dlab.backendapi.dao.UserRoleDaoImpl;
+import com.epam.dlab.backendapi.service.AccessKeyService;
+import com.epam.dlab.backendapi.service.ApplicationSettingService;
+import com.epam.dlab.backendapi.service.ApplicationSettingServiceImpl;
+import com.epam.dlab.backendapi.service.BackupService;
+import com.epam.dlab.backendapi.service.ComputationalService;
+import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.EnvironmentService;
+import com.epam.dlab.backendapi.service.ExploratoryService;
+import com.epam.dlab.backendapi.service.ExternalLibraryService;
+import com.epam.dlab.backendapi.service.GitCredentialService;
+import com.epam.dlab.backendapi.service.GuacamoleService;
+import com.epam.dlab.backendapi.service.ImageExploratoryService;
+import com.epam.dlab.backendapi.service.InactivityService;
+import com.epam.dlab.backendapi.service.KeycloakService;
+import com.epam.dlab.backendapi.service.KeycloakServiceImpl;
+import com.epam.dlab.backendapi.service.LibraryService;
+import com.epam.dlab.backendapi.service.ProjectService;
+import com.epam.dlab.backendapi.service.ReuploadKeyService;
+import com.epam.dlab.backendapi.service.SchedulerJobService;
+import com.epam.dlab.backendapi.service.SecurityService;
+import com.epam.dlab.backendapi.service.SecurityServiceImpl;
+import com.epam.dlab.backendapi.service.SystemInfoService;
+import com.epam.dlab.backendapi.service.TagService;
+import com.epam.dlab.backendapi.service.TagServiceImpl;
+import com.epam.dlab.backendapi.service.UserGroupService;
+import com.epam.dlab.backendapi.service.UserRoleService;
+import com.epam.dlab.backendapi.service.UserRoleServiceImpl;
+import com.epam.dlab.backendapi.service.UserSettingService;
+import com.epam.dlab.backendapi.service.UserSettingServiceImpl;
+import com.epam.dlab.backendapi.service.impl.AccessKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.BackupServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ComputationalServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EndpointServiceImpl;
+import com.epam.dlab.backendapi.service.impl.EnvironmentServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GitCredentialServiceImpl;
+import com.epam.dlab.backendapi.service.impl.GuacamoleServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ImageExploratoryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.InactivityServiceImpl;
+import com.epam.dlab.backendapi.service.impl.LibraryServiceImpl;
+import com.epam.dlab.backendapi.service.impl.MavenCentralLibraryService;
+import com.epam.dlab.backendapi.service.impl.ProjectServiceImpl;
+import com.epam.dlab.backendapi.service.impl.ReuploadKeyServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl;
+import com.epam.dlab.backendapi.service.impl.SystemInfoServiceImpl;
+import com.epam.dlab.backendapi.service.impl.UserGroupServiceImpl;
 import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.mongo.MongoService;
 import com.epam.dlab.rest.client.RESTService;
@@ -95,6 +152,7 @@
 		bind(EndpointDAO.class).to(EndpointDAOImpl.class);
 		bind(ProjectService.class).to(ProjectServiceImpl.class);
 		bind(ProjectDAO.class).to(ProjectDAOImpl.class);
+		bind(BillingDAO.class).to(BaseBillingDAO.class);
 		bind(TagService.class).to(TagServiceImpl.class);
 		bind(SecurityService.class).to(SecurityServiceImpl.class);
 		bind(KeycloakService.class).to(KeycloakServiceImpl.class);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/BillingResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/BillingResource.java
index 54e3695..1916a38 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/BillingResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/BillingResource.java
@@ -22,7 +22,6 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.BillingFilter;
 import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.service.BillingServiceNew;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
 
@@ -41,43 +40,24 @@
 public class BillingResource {
 
     private final BillingService billingService;
-    private final BillingServiceNew billingServiceNew;
 
     @Inject
-    public BillingResource(BillingService billingService, BillingServiceNew billingServiceNew) {
+    public BillingResource(BillingService billingService) {
         this.billingService = billingService;
-        this.billingServiceNew = billingServiceNew;
     }
 
-//    @POST
-//    @Path("/report")
-//    @Produces(MediaType.APPLICATION_JSON)
-//    public Document getBillingReport(@Auth UserInfo userInfo, @Valid @NotNull BillingFilter formDTO) {
-//        return billingService.getBillingReport(userInfo, formDTO);
-//    }
-//
-//    @POST
-//    @Path("/report/download")
-//    @Produces(MediaType.APPLICATION_OCTET_STREAM)
-//    public Response downloadBillingReport(@Auth UserInfo userInfo, @Valid @NotNull BillingFilter formDTO) {
-//        return Response.ok(billingService.downloadReport(userInfo, formDTO))
-//                .header(HttpHeaders.CONTENT_DISPOSITION,
-//                        "attachment; filename=\"" + billingService.getReportFileName(userInfo, formDTO) + "\"")
-//                .build();
-//    }
-
     @POST
     @Path("/report")
     @Produces(MediaType.APPLICATION_JSON)
     public Response getBillingReport(@Auth UserInfo userInfo, @Valid @NotNull BillingFilter filter) {
-        return Response.ok(billingServiceNew.getBillingReport(userInfo, filter)).build();
+        return Response.ok(billingService.getBillingReport(userInfo, filter)).build();
     }
 
     @POST
     @Path("/report/download")
     @Produces(MediaType.APPLICATION_OCTET_STREAM)
     public Response downloadBillingReport(@Auth UserInfo userInfo, @Valid @NotNull BillingFilter filter) {
-        return Response.ok(billingServiceNew.downloadReport(userInfo, filter))
+        return Response.ok(billingService.downloadReport(userInfo, filter))
                 .header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"billing-report.csv\"")
                 .build();
     }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
index f227f3f..3553ff4 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
@@ -27,7 +27,12 @@
 import org.hibernate.validator.constraints.NotEmpty;
 
 import javax.annotation.security.RolesAllowed;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
@@ -44,77 +49,62 @@
 	}
 
 	@GET
-	@Path("user")
-	@Produces(MediaType.APPLICATION_JSON)
-	public Response getUsersWithActiveEnv(@Auth UserInfo userInfo) {
-		log.debug("User {} requested information about active environments", userInfo.getName());
-		return Response.ok(environmentService.getUsers()).build();
-	}
-
-	@GET
 	@Path("all")
 	@Produces(MediaType.APPLICATION_JSON)
 	public Response getAllEnv(@Auth UserInfo userInfo) {
 		log.debug("Admin {} requested information about all user's environment", userInfo.getName());
-		return Response.ok(environmentService.getAllEnv()).build();
+		return Response.ok(environmentService.getAllEnv(userInfo)).build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("stop")
-	public Response stopEnv(@Auth UserInfo userInfo,
-							@NotEmpty String user) {
-		log.info("User {} is stopping {} environment", userInfo.getName(), user);
-		environmentService.stopEnvironment(userInfo, user);
-		return Response.ok().build();
-	}
-
-	@POST
-	@Consumes(MediaType.TEXT_PLAIN)
-	@Produces(MediaType.APPLICATION_JSON)
-	@Path("stop/{exploratoryName}")
+	@Path("stop/{projectName}/{exploratoryName}")
 	public Response stopNotebook(@Auth UserInfo userInfo, @NotEmpty String user,
+								 @PathParam("projectName") String projectName,
 								 @PathParam("exploratoryName") String exploratoryName) {
 		log.info("Admin {} is stopping notebook {} of user {}", userInfo.getName(), exploratoryName, user);
-		environmentService.stopExploratory(userInfo, user, exploratoryName);
+		environmentService.stopExploratory(userInfo, user, projectName, exploratoryName);
 		return Response.ok().build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("stop/{exploratoryName}/{computationalName}")
+	@Path("stop/{projectName}/{exploratoryName}/{computationalName}")
 	public Response stopCluster(@Auth UserInfo userInfo, @NotEmpty String user,
+								@PathParam("projectName") String projectName,
 								@PathParam("exploratoryName") String exploratoryName,
 								@PathParam("computationalName") String computationalName) {
 		log.info("Admin {} is stopping computational resource {} affiliated with exploratory {} of user {}",
 				userInfo.getName(), computationalName, exploratoryName, user);
-		environmentService.stopComputational(userInfo, user, exploratoryName, computationalName);
+		environmentService.stopComputational(userInfo, user, projectName, exploratoryName, computationalName);
 		return Response.ok().build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("terminate/{exploratoryName}")
+	@Path("terminate/{projectName}/{exploratoryName}")
 	public Response terminateNotebook(@Auth UserInfo userInfo, @NotEmpty String user,
+									  @PathParam("projectName") String projectName,
 									  @PathParam("exploratoryName") String exploratoryName) {
 		log.info("Admin {} is terminating notebook {} of user {}", userInfo.getName(), exploratoryName, user);
-		environmentService.terminateExploratory(userInfo, user, exploratoryName);
+		environmentService.terminateExploratory(userInfo, user, projectName, exploratoryName);
 		return Response.ok().build();
 	}
 
 	@POST
 	@Consumes(MediaType.TEXT_PLAIN)
 	@Produces(MediaType.APPLICATION_JSON)
-	@Path("terminate/{exploratoryName}/{computationalName}")
+	@Path("terminate/{projectName}/{exploratoryName}/{computationalName}")
 	public Response terminateCluster(@Auth UserInfo userInfo, @NotEmpty String user,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
 		log.info("Admin {} is terminating computational resource {} affiliated with exploratory {} of user {}",
 				userInfo.getName(), computationalName, exploratoryName, user);
-		environmentService.terminateComputational(userInfo, user, exploratoryName, computationalName);
+		environmentService.terminateComputational(userInfo, user, projectName, exploratoryName, computationalName);
 		return Response.ok().build();
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
index cb2c7d2..7b29af1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
@@ -37,7 +37,14 @@
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -109,11 +116,12 @@
 	 * @return Invocation response as JSON string.
 	 */
 	@DELETE
-	@Path("/{name}/stop")
+	@Path("/{project}/{name}/stop")
 	public String stop(@Auth UserInfo userInfo,
+					   @PathParam("project") String project,
 					   @PathParam("name") String name) {
 		log.debug("Stopping exploratory environment {} for user {}", name, userInfo.getName());
-		return exploratoryService.stop(userInfo, name);
+		return exploratoryService.stop(userInfo, project, name);
 	}
 
 	/**
@@ -124,29 +132,32 @@
 	 * @return Invocation response as JSON string.
 	 */
 	@DELETE
-	@Path("/{name}/terminate")
+	@Path("/{project}/{name}/terminate")
 	public String terminate(@Auth UserInfo userInfo,
+							@PathParam("project") String project,
 							@PathParam("name") String name) {
 		log.debug("Terminating exploratory environment {} for user {}", name, userInfo.getName());
-		return exploratoryService.terminate(userInfo, name);
+		return exploratoryService.terminate(userInfo, project, name);
 	}
 
 	@PUT
-	@Path("/{name}/reconfigure")
+	@Path("/{project}/{name}/reconfigure")
 	public Response reconfigureSpark(@Auth UserInfo userInfo,
+									 @PathParam("project") String project,
 									 @PathParam("name") String name,
 									 List<ClusterConfig> config) {
 		log.debug("Updating exploratory {} spark cluster for user {}", name, userInfo.getName());
-		exploratoryService.updateClusterConfig(userInfo, name, config);
+		exploratoryService.updateClusterConfig(userInfo, project, name, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("/{name}/cluster/config")
+	@Path("/{project}/{name}/cluster/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("project") String project,
 									 @PathParam("name") String name) {
 		log.debug("Getting exploratory {} spark cluster configuration for user {}", name, userInfo.getName());
-		return Response.ok(exploratoryService.getClusterConfig(userInfo, name)).build();
+		return Response.ok(exploratoryService.getClusterConfig(userInfo, project, name)).build();
 	}
 
 	private Exploratory getExploratory(ExploratoryCreateFormDTO formDTO) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
index a39a468..f913e2b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
@@ -30,8 +30,18 @@
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
 import java.net.URI;
 import java.util.List;
 
@@ -58,8 +68,8 @@
 								@Valid @NotNull ExploratoryImageCreateFormDTO formDTO,
 								@Context UriInfo uriInfo) {
 		log.debug("Creating an image {} for user {}", formDTO, ui.getName());
-		String uuid = imageExploratoryService.createImage(ui, formDTO.getNotebookName(), formDTO.getName(), formDTO
-				.getDescription());
+		String uuid = imageExploratoryService.createImage(ui, formDTO.getProjectName(), formDTO.getNotebookName(),
+				formDTO.getName(), formDTO.getDescription());
 		requestId.put(ui.getName(), uuid);
 
 		final URI imageUri = UriBuilder.fromUri(uriInfo.getRequestUri())
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java
index ee1ed78..c8952f3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResource.java
@@ -22,7 +22,6 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.HealthStatusPageDTO;
 import com.epam.dlab.backendapi.resources.dto.ProjectInfrastructureInfo;
-import com.epam.dlab.backendapi.roles.UserRoles;
 import com.epam.dlab.backendapi.service.InfrastructureInfoService;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
@@ -71,8 +70,7 @@
 	@Path("/status")
 	public HealthStatusPageDTO status(@Auth UserInfo userInfo,
 									  @QueryParam("full") @DefaultValue("0") int fullReport) {
-		return infrastructureInfoService
-				.getHeathStatus(userInfo, fullReport != 0, UserRoles.isAdmin(userInfo));
+		return infrastructureInfoService.getHeathStatus(userInfo, fullReport != 0);
 	}
 
 	/**
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
index 64ede19..841ed73 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
@@ -41,7 +41,12 @@
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -81,6 +86,7 @@
 	@GET
 	@Path("/lib_groups")
 	public Iterable<String> getLibGroupList(@Auth UserInfo userInfo,
+											@QueryParam("project_name") @NotBlank String projectName,
 											@QueryParam("exploratory_name") @NotBlank String exploratoryName,
 											@QueryParam("computational_name") String computationalName) {
 
@@ -88,11 +94,11 @@
 				exploratoryName, computationalName);
 		try {
 			if (StringUtils.isEmpty(computationalName)) {
-				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), projectName,
 						exploratoryName);
 				return ExploratoryLibCache.getCache().getLibGroupList(userInfo, userInstance);
 			} else {
-				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+				UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), projectName,
 						exploratoryName, computationalName);
 
 				userInstance.setResources(userInstance.getResources().stream()
@@ -120,13 +126,14 @@
 	@GET
 	@Path("/lib_list")
 	public List<Document> getLibList(@Auth UserInfo userInfo,
+									 @QueryParam("project_name") @NotBlank String projectName,
 									 @QueryParam("exploratory_name") @NotBlank String exploratoryName,
 									 @QueryParam("computational_name") String computationalName) {
 
 		log.debug("Loading list of libraries for user {} and exploratory {} and computational {}", userInfo.getName(),
 				exploratoryName, computationalName);
 		try {
-			return libraryService.getLibs(userInfo.getName(), exploratoryName, computationalName);
+			return libraryService.getLibs(userInfo.getName(), projectName, exploratoryName, computationalName);
 
 		} catch (Exception t) {
 			log.error("Cannot load installed libraries for user {} and exploratory {} an", userInfo.getName(),
@@ -147,14 +154,14 @@
 	 */
 	@GET
 	@Path("/lib_list/formatted")
-
 	public List<LibInfoRecord> getLibListFormatted(@Auth UserInfo userInfo,
+												   @QueryParam("project_name") @NotBlank String projectName,
 												   @QueryParam("exploratory_name") @NotBlank String exploratoryName) {
 
 		log.debug("Loading formatted list of libraries for user {} and exploratory {}", userInfo.getName(),
 				exploratoryName);
 		try {
-			return libraryService.getLibInfo(userInfo.getName(), exploratoryName);
+			return libraryService.getLibInfo(userInfo.getName(), projectName, exploratoryName);
 		} catch (Exception t) {
 			log.error("Cannot load list of libraries for user {} and exploratory {}", userInfo.getName(),
 					exploratoryName, t);
@@ -175,15 +182,16 @@
 	public Response libInstall(@Auth UserInfo userInfo,
 							   @Valid @NotNull LibInstallFormDTO formDTO) {
 		log.debug("Installing libs to environment {} for user {}", formDTO, userInfo.getName());
+		String project = formDTO.getProject();
 		final String exploratoryName = formDTO.getNotebookName();
 		final List<LibInstallDTO> libs = formDTO.getLibs();
 		final String computationalName = formDTO.getComputationalName();
 		String uuid = StringUtils.isEmpty(computationalName) ?
-				libraryService.installExploratoryLibs(userInfo, exploratoryName, libs) :
-				libraryService.installComputationalLibs(userInfo, exploratoryName, computationalName, libs);
+				libraryService.installExploratoryLibs(userInfo, project, exploratoryName, libs) :
+				libraryService.installComputationalLibs(userInfo, project, exploratoryName, computationalName, libs);
 		return Response.ok(uuid)
 				.build();
-	}
+    }
 
 	/**
 	 * Returns the list of available libraries for exploratory basing on search conditions provided in @formDTO.
@@ -203,7 +211,7 @@
 
 			if (StringUtils.isNotEmpty(formDTO.getComputationalName())) {
 
-				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getProjectName(),
 						formDTO.getNotebookName(), formDTO.getComputationalName());
 
 				userInstance.setResources(userInstance.getResources().stream()
@@ -211,7 +219,8 @@
 						.collect(Collectors.toList()));
 
 			} else {
-				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getNotebookName());
+				userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getProjectName(),
+						formDTO.getNotebookName());
 			}
 
 			return ExploratoryLibCache.getCache().getLibList(userInfo, userInstance, formDTO.getGroup(), formDTO
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
index a93224c..7b26d73 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
@@ -1,7 +1,11 @@
 package com.epam.dlab.backendapi.resources;
 
 import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.domain.*;
+import com.epam.dlab.backendapi.domain.CreateProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
+import com.epam.dlab.backendapi.domain.UpdateProjectBudgetDTO;
+import com.epam.dlab.backendapi.domain.UpdateProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.ProjectActionFormDTO;
 import com.epam.dlab.backendapi.service.AccessKeyService;
 import com.epam.dlab.backendapi.service.ProjectService;
@@ -20,7 +24,15 @@
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
@@ -58,7 +70,7 @@
 	})
 	@POST
 	@Consumes(MediaType.APPLICATION_JSON)
-	@RolesAllowed("/api/project")
+	@RolesAllowed("/api/project/create")
 	public Response createProject(@Parameter(hidden = true) @Auth UserInfo userInfo,
 								  @Valid CreateProjectDTO projectDTO) {
 		projectService.create(userInfo, new ProjectDTO(projectDTO.getName(), projectDTO.getGroups(),
@@ -104,33 +116,12 @@
 	@RolesAllowed("/api/project")
 	public Response stopProject(@Parameter(hidden = true) @Auth UserInfo userInfo,
 								@NotNull @Valid ProjectActionFormDTO stopProjectDTO) {
-		projectService.stop(userInfo, stopProjectDTO.getEndpoints(), stopProjectDTO.getProjectName());
+		projectService.stopWithResources(userInfo, stopProjectDTO.getEndpoints(), stopProjectDTO.getProjectName());
 		return Response
 				.accepted()
 				.build();
 	}
 
-	@Operation(summary = "Stop project on Manage environment popup", tags = "project")
-	@ApiResponses({
-			@ApiResponse(responseCode = "202", description = "Project is stopping"),
-			@ApiResponse(responseCode = "400", description = "Validation error", content = @Content(mediaType =
-					MediaType.APPLICATION_JSON,
-					schema = @Schema(implementation = ErrorDTO.class)))
-	})
-	@Path("managing/stop/{name}")
-	@POST
-	@Consumes(MediaType.APPLICATION_JSON)
-	@RolesAllowed("/api/project")
-	public Response stopProjectWithResources(@Parameter(hidden = true) @Auth UserInfo userInfo,
-											 @Parameter(description = "Project name")
-											 @PathParam("name") String name) {
-		projectService.stopWithResources(userInfo, name);
-		return Response
-				.accepted()
-				.build();
-	}
-
-
 	@Operation(summary = "Get project info", tags = "project")
 	@ApiResponses({
 			@ApiResponse(responseCode = "200", description = "Return information about project",
@@ -161,27 +152,9 @@
 	@GET
 	@Produces(MediaType.APPLICATION_JSON)
 	@RolesAllowed("/api/project")
-	public Response getProjects(@Parameter(hidden = true) @Auth UserInfo userInfo,
-								@Parameter(description = "Project name")
-								@PathParam("name") String name) {
+	public Response getProjects(@Parameter(hidden = true) @Auth UserInfo userInfo) {
 		return Response
-				.ok(projectService.getProjects())
-				.build();
-	}
-
-	@Operation(summary = "Get available projects for managing", tags = "project")
-	@ApiResponses({
-			@ApiResponse(responseCode = "200", description = "Return information about projects",
-					content = @Content(mediaType = MediaType.APPLICATION_JSON, schema =
-					@Schema(implementation = ProjectManagingDTO.class))),
-	})
-	@GET
-	@Path("managing")
-	@Produces(MediaType.APPLICATION_JSON)
-	@RolesAllowed("/api/project")
-	public Response getProjectsForManaging(@Parameter(hidden = true) @Auth UserInfo userInfo) {
-		return Response
-				.ok(projectService.getProjectsForManaging())
+				.ok(projectService.getProjects(userInfo))
 				.build();
 	}
 
@@ -214,7 +187,7 @@
 	@PUT
 	@RolesAllowed("/api/project")
 	public Response updateProject(@Parameter(hidden = true) @Auth UserInfo userInfo, UpdateProjectDTO projectDTO) {
-		projectService.update(userInfo, projectDTO);
+		projectService.update(userInfo, projectDTO, projectDTO.getName());
 		return Response.ok().build();
 	}
 
@@ -234,16 +207,6 @@
 		return Response.ok().build();
 	}
 
-	@DELETE
-	@Path("{name}")
-	@RolesAllowed("/api/project")
-	public Response removeProject(
-			@Parameter(hidden = true) @Auth UserInfo userInfo,
-			@PathParam("name") String name) {
-		projectService.terminateProject(userInfo, name);
-		return Response.ok().build();
-	}
-
 	@Operation(summary = "Updates project budget", tags = "project")
 	@ApiResponses({
 			@ApiResponse(responseCode = "200", description = "Project budget is successfully updated"),
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
index c4f9ee4..dd8f82f 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
@@ -28,7 +28,14 @@
 import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
 
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
@@ -56,12 +63,13 @@
 	 * @return response
 	 */
 	@POST
-	@Path("/{exploratoryName}")
+	@Path("/{projectName}/{exploratoryName}")
 	@Consumes(MediaType.APPLICATION_JSON)
 	public Response updateExploratoryScheduler(@Auth UserInfo userInfo,
+											   @PathParam("projectName") String projectName,
 											   @PathParam("exploratoryName") String exploratoryName,
 											   @SchedulerJobDTOValid SchedulerJobDTO dto) {
-		schedulerJobService.updateExploratorySchedulerData(userInfo.getName(), exploratoryName, dto);
+		schedulerJobService.updateExploratorySchedulerData(userInfo.getName(), projectName, exploratoryName, dto);
 		return Response.ok().build();
 	}
 
@@ -92,16 +100,17 @@
 	 * @return response
 	 */
 	@POST
-	@Path("/{exploratoryName}/{computationalName}")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}")
 	@Consumes(MediaType.APPLICATION_JSON)
 	public Response updateComputationalScheduler(@Auth UserInfo userInfo,
+												 @PathParam("projectName") String projectName,
 												 @PathParam("exploratoryName") String exploratoryName,
 												 @PathParam("computationalName") String computationalName,
 												 @SchedulerJobDTOValid SchedulerJobDTO dto) {
-		schedulerJobService.updateComputationalSchedulerData(userInfo.getName(), exploratoryName,
+		schedulerJobService.updateComputationalSchedulerData(userInfo.getName(), projectName, exploratoryName,
 				computationalName, dto);
 		return Response.ok().build();
-	}
+    }
 
 	/**
 	 * Updates computational resource <code>computationalName<code/> affiliated with exploratory
@@ -132,13 +141,14 @@
 	 * @return scheduler job data
 	 */
 	@GET
-	@Path("/{exploratoryName}")
+	@Path("/{projectName}/{exploratoryName}")
 	@Produces(MediaType.APPLICATION_JSON)
 	public Response fetchSchedulerJobForUserAndExploratory(@Auth UserInfo userInfo,
+														   @PathParam("projectName") String projectName,
 														   @PathParam("exploratoryName") String exploratoryName) {
 		log.debug("Loading scheduler job for user {} and exploratory {}...", userInfo.getName(), exploratoryName);
 		final SchedulerJobDTO schedulerJob =
-				schedulerJobService.fetchSchedulerJobForUserAndExploratory(userInfo.getName(), exploratoryName);
+				schedulerJobService.fetchSchedulerJobForUserAndExploratory(userInfo.getName(), projectName, exploratoryName);
 		return Response.ok(schedulerJob).build();
 	}
 
@@ -152,15 +162,16 @@
 	 * @return scheduler job data
 	 */
 	@GET
-	@Path("/{exploratoryName}/{computationalName}")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}")
 	@Produces(MediaType.APPLICATION_JSON)
 	public Response fetchSchedulerJobForComputationalResource(@Auth UserInfo userInfo,
 															  @PathParam("exploratoryName") String exploratoryName,
+															  @PathParam("projectName") String projectName,
 															  @PathParam("computationalName") String computationalName) {
 		log.debug("Loading scheduler job for user {}, exploratory {} and computational resource {}...",
 				userInfo.getName(), exploratoryName, computationalName);
 		final SchedulerJobDTO schedulerJob = schedulerJobService
-				.fetchSchedulerJobForComputationalResource(userInfo.getName(), exploratoryName, computationalName);
+				.fetchSchedulerJobForComputationalResource(userInfo.getName(), projectName, exploratoryName, computationalName);
 		return Response.ok(schedulerJob).build();
 	}
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java
index 154ddc2..8cd3381 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserGroupResource.java
@@ -20,24 +20,26 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.GroupDTO;
-import com.epam.dlab.backendapi.resources.dto.UpdateRoleGroupDto;
-import com.epam.dlab.backendapi.resources.dto.UpdateUserGroupDto;
 import com.epam.dlab.backendapi.service.UserGroupService;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
-import org.hibernate.validator.constraints.NotEmpty;
 
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import java.util.Set;
 
 @Slf4j
 @Path("group")
-@RolesAllowed("/roleManagement")
 @Consumes(MediaType.APPLICATION_JSON)
 @Produces(MediaType.APPLICATION_JSON)
 public class UserGroupResource {
@@ -51,71 +53,34 @@
 
 
 	@POST
-	public Response createGroup(@Auth UserInfo userInfo,
-								@Valid GroupDTO dto) {
+	@RolesAllowed("/roleManagement/create")
+	public Response createGroup(@Auth UserInfo userInfo, @Valid GroupDTO dto) {
 		log.debug("Creating new group {}", dto.getName());
 		userGroupService.createGroup(dto.getName(), dto.getRoleIds(), dto.getUsers());
 		return Response.ok().build();
 	}
 
 	@PUT
+	@RolesAllowed("/roleManagement")
 	public Response updateGroup(@Auth UserInfo userInfo, @Valid GroupDTO dto) {
 		log.debug("Updating group {}", dto.getName());
-		userGroupService.updateGroup(dto.getName(), dto.getRoleIds(), dto.getUsers());
+		userGroupService.updateGroup(userInfo, dto.getName(), dto.getRoleIds(), dto.getUsers());
 		return Response.ok().build();
 	}
 
 	@GET
+	@RolesAllowed("/roleManagement")
 	public Response getGroups(@Auth UserInfo userInfo) {
 		log.debug("Getting all groups for admin {}...", userInfo.getName());
-		return Response.ok(userGroupService.getAggregatedRolesByGroup()).build();
-	}
-
-	@PUT
-	@Path("role")
-	public Response updateRolesForGroup(@Auth UserInfo userInfo, @Valid UpdateRoleGroupDto updateRoleGroupDto) {
-		log.info("Admin {} is trying to add new group {} to roles {}", userInfo.getName(),
-				updateRoleGroupDto.getGroup(), updateRoleGroupDto.getRoleIds());
-		userGroupService.updateRolesForGroup(updateRoleGroupDto.getGroup(), updateRoleGroupDto.getRoleIds());
-		return Response.ok().build();
-	}
-
-	@DELETE
-	@Path("role")
-	public Response deleteGroupFromRole(@Auth UserInfo userInfo,
-										@QueryParam("group") @NotEmpty Set<String> groups,
-										@QueryParam("roleId") @NotEmpty Set<String> roleIds) {
-		log.info("Admin {} is trying to delete groups {} from roles {}", userInfo.getName(), groups, roleIds);
-		userGroupService.removeGroupFromRole(groups, roleIds);
-		return Response.ok().build();
+		return Response.ok(userGroupService.getAggregatedRolesByGroup(userInfo)).build();
 	}
 
 	@DELETE
 	@Path("{id}")
-	public Response deleteGroup(@Auth UserInfo userInfo,
-								@PathParam("id") String group) {
+	@RolesAllowed("/roleManagement/delete")
+	public Response deleteGroup(@Auth UserInfo userInfo, @PathParam("id") String group) {
 		log.info("Admin {} is trying to delete group {} from application", userInfo.getName(), group);
 		userGroupService.removeGroup(group);
 		return Response.ok().build();
 	}
-
-	@PUT
-	@Path("user")
-	public Response addUserToGroup(@Auth UserInfo userInfo,
-								   @Valid UpdateUserGroupDto updateUserGroupDto) {
-		log.info("Admin {} is trying to add new users {} to group {}", userInfo.getName(),
-				updateUserGroupDto.getUsers(), updateUserGroupDto.getGroup());
-		userGroupService.addUsersToGroup(updateUserGroupDto.getGroup(), updateUserGroupDto.getUsers());
-		return Response.ok().build();
-	}
-
-	@DELETE
-	@Path("user")
-	public Response deleteUserFromGroup(@Auth UserInfo userInfo,
-										@QueryParam("user") @NotEmpty String user,
-										@QueryParam("group") @NotEmpty String group) {
-		log.info("Admin {} is trying to delete user {} from group {}", userInfo.getName(), user, group);
-		userGroupService.removeUserFromGroup(group, user);
-		return Response.ok().build();
-	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java
index b9d0619..b74ef1c 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/UserRoleResource.java
@@ -26,7 +26,11 @@
 import lombok.extern.slf4j.Slf4j;
 
 import javax.annotation.security.RolesAllowed;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
@@ -47,7 +51,7 @@
 	@GET
 	public Response getRoles(@Auth UserInfo userInfo) {
 		log.debug("Getting all roles for admin {}...", userInfo.getName());
-		return Response.ok(userRoleService.getUserRoles()).build();
+		return Response.ok(userRoleService.getUserRoles(userInfo)).build();
 	}
 
 	@POST
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
index d2ed15d..87f99bd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
@@ -34,13 +34,18 @@
 import com.epam.dlab.rest.contracts.ComputationalAPI;
 import com.google.inject.Inject;
 import io.dropwizard.auth.Auth;
-import io.swagger.v3.oas.annotations.Operation;
 import io.swagger.v3.oas.annotations.Parameter;
 import lombok.extern.slf4j.Slf4j;
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -57,12 +62,17 @@
 @Produces(MediaType.APPLICATION_JSON)
 @Slf4j
 public class ComputationalResourceAws implements ComputationalAPI {
-
 	@Inject
 	private SelfServiceApplicationConfiguration configuration;
 	@Inject
 	private ComputationalService computationalService;
 
+	@GET
+	@Path("/{project}/{endpoint}/templates")
+	public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+								 @PathParam("endpoint") String endpoint) {
+		return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+	}
 
 	/**
 	 * Asynchronously creates EMR cluster
@@ -133,13 +143,14 @@
 	 * @return 200 OK if operation is successfully triggered
 	 */
 	@DELETE
-	@Path("/{exploratoryName}/{computationalName}/terminate")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
 	public Response terminate(@Auth UserInfo userInfo,
+							  @PathParam("projectName") String projectName,
 							  @PathParam("exploratoryName") String exploratoryName,
 							  @PathParam("computationalName") String computationalName) {
 		log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+		computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -155,14 +166,15 @@
 	@DELETE
 	@Path("/{project}/{exploratoryName}/{computationalName}/stop")
 	public Response stop(@Auth UserInfo userInfo,
+						 @PathParam("project") String project,
 						 @PathParam("exploratoryName") String exploratoryName,
 						 @PathParam("computationalName") String computationalName) {
 		log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+		computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
 
 		return Response.ok().build();
-	}
+    }
 
 	/**
 	 * Sends request to provisioning service for starting the computational resource for user.
@@ -186,22 +198,24 @@
 	}
 
 	@PUT
-	@Path("dataengine/{exploratoryName}/{computationalName}/config")
+	@Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+										   @PathParam("projectName") String projectName,
 										   @PathParam("exploratoryName") String exploratoryName,
 										   @PathParam("computationalName") String computationalName,
 										   @Valid @NotNull List<ClusterConfig> config) {
 
-		computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+		computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("{exploratoryName}/{computationalName}/config")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
-		return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+		return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
 	}
 
 	private void validate(SparkStandaloneClusterCreateForm form) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
index 1ba09d8..29f9794 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
@@ -21,26 +21,27 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.auth.rest.UserSessionDurationAuthorizer;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.ComputationalDAO;
-import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
 import com.epam.dlab.backendapi.roles.RoleType;
 import com.epam.dlab.backendapi.roles.UserRoles;
 import com.epam.dlab.backendapi.service.ComputationalService;
-import com.epam.dlab.constants.ServiceConsts;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
 import com.google.inject.Inject;
-import com.google.inject.name.Named;
 import io.dropwizard.auth.Auth;
+import io.swagger.v3.oas.annotations.Parameter;
 import lombok.extern.slf4j.Slf4j;
 
 import javax.annotation.security.RolesAllowed;
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -53,22 +54,19 @@
 @Produces(MediaType.APPLICATION_JSON)
 @Slf4j
 public class ComputationalResourceAzure {
+	private final ComputationalService computationalService;
 
 	@Inject
-	private ExploratoryDAO exploratoryDAO;
+	public ComputationalResourceAzure(ComputationalService computationalService) {
+		this.computationalService = computationalService;
+	}
 
-	@Inject
-	private ComputationalDAO computationalDAO;
-
-	@Inject
-	@Named(ServiceConsts.PROVISIONING_SERVICE_NAME)
-	private RESTService provisioningService;
-
-	@Inject
-	private SelfServiceApplicationConfiguration configuration;
-
-	@Inject
-	private ComputationalService computationalService;
+	@GET
+	@Path("/{project}/{endpoint}/templates")
+	public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+								 @PathParam("endpoint") String endpoint) {
+		return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+	}
 
 	/**
 	 * Asynchronously creates computational Spark cluster.
@@ -105,14 +103,15 @@
 	 * @return 200 OK if operation is successfully triggered
 	 */
 	@DELETE
-	@Path("/{exploratoryName}/{computationalName}/terminate")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
 	public Response terminate(@Auth UserInfo userInfo,
+							  @PathParam("projectName") String projectName,
 							  @PathParam("exploratoryName") String exploratoryName,
 							  @PathParam("computationalName") String computationalName) {
 
 		log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+		computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -127,12 +126,13 @@
 	 */
 	@DELETE
 	@Path("/{project}/{exploratoryName}/{computationalName}/stop")
-	public Response stop( @Auth UserInfo userInfo,
+	public Response stop(@Auth UserInfo userInfo,
+						 @PathParam("project") String project,
 						 @PathParam("exploratoryName") String exploratoryName,
 						 @PathParam("computationalName") String computationalName) {
 		log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+		computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -159,21 +159,23 @@
 	}
 
 	@PUT
-	@Path("dataengine/{exploratoryName}/{computationalName}/config")
+	@Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+										   @PathParam("projectName") String projectName,
 										   @PathParam("exploratoryName") String exploratoryName,
 										   @PathParam("computationalName") String computationalName,
 										   @Valid @NotNull List<ClusterConfig> config) {
 
-		computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+		computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("{exploratoryName}/{computationalName}/config")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
-		return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+		return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
index 111bcfa..abf4c6d 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
@@ -58,9 +58,8 @@
 	@Path("computational")
 	public Response updateComputationalLastActivity(CheckInactivityStatusDTO dto) {
 		requestId.checkAndRemove(dto.getRequestId());
-		inactivityService.updateLastActivityForComputational(new UserInfo(dto.getUser(), null),
-				dto.getExploratoryName(),
-				dto.getComputationalName(), toLocalDateTime(dto.getLastActivityUnixTime()));
+		inactivityService.updateLastActivityForComputational(new UserInfo(dto.getUser(), null), null,
+				dto.getExploratoryName(), dto.getComputationalName(), toLocalDateTime(dto.getLastActivityUnixTime()));
 		return Response.ok().build();
 	}
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
index 04ca8fb..2b286b5 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
@@ -19,7 +19,6 @@
 
 package com.epam.dlab.backendapi.resources.callback;
 
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.ComputationalService;
@@ -29,7 +28,6 @@
 import com.epam.dlab.dto.computational.ComputationalStatusDTO;
 import com.epam.dlab.dto.computational.UserComputationalResource;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.rest.contracts.ApiCallbacks;
 import com.google.inject.Inject;
 import lombok.extern.slf4j.Slf4j;
@@ -42,8 +40,6 @@
 import javax.ws.rs.core.Response;
 import java.util.Date;
 
-import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-
 @Path("/infrastructure_provision/computational_resources")
 @Consumes(MediaType.APPLICATION_JSON)
 @Produces(MediaType.APPLICATION_JSON)
@@ -75,11 +71,12 @@
 		String uuid = dto.getRequestId();
 		requestId.checkAndRemove(uuid);
 
-		UserComputationalResource compResource = computationalService.getComputationalResource(dto.getUser(),
-				dto.getExploratoryName(), dto.getComputationalName()).orElseThrow(() ->
-				new DlabException("Computational resource " + dto.getComputationalName() +
-						" of exploratory environment " + dto.getExploratoryName() + " for user " + dto.getUser() +
-						" doesn't exist"));
+		UserComputationalResource compResource = computationalService.getComputationalResource(dto.getUser(), dto.getProject(),
+				dto.getExploratoryName(), dto.getComputationalName())
+				.orElseThrow(() ->
+						new DlabException(String.format("Computational resource %s of exploratory environment %s of " +
+										"project %s for user %s doesn't exist", dto.getComputationalName(),
+								dto.getExploratoryName(), dto.getProject(), dto.getUser())));
 		log.debug("Current status for computational resource {} of exploratory environment {} for user {} is {}",
 				dto.getComputationalName(), dto.getExploratoryName(), dto.getUser(),
 				compResource.getStatus());
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
index 618fb04..d4c059e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
@@ -20,14 +20,12 @@
 package com.epam.dlab.backendapi.resources.callback;
 
 import com.epam.dlab.backendapi.dao.EnvDAO;
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.status.EnvStatusDTO;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.rest.contracts.ApiCallbacks;
 import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
 import lombok.extern.slf4j.Slf4j;
 
 import javax.ws.rs.Consumes;
@@ -63,7 +61,7 @@
             if (UserInstanceStatus.FAILED == UserInstanceStatus.of(dto.getStatus())) {
                 log.warn("Request for the status of resources for user {} fails: {}", dto.getUser(), dto.getErrorMessage());
             } else {
-                envDAO.updateEnvStatus(dto.getUser(), dto.getResourceList());
+                envDAO.updateEnvStatus(dto.getUser(), null, dto.getResourceList());
             }
         } catch (DlabException e) {
             log.warn("Could not update status of resources for user {}: {}", dto.getUser(), e.getLocalizedMessage(), e);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
index 8e11c0b..c275a18 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
@@ -41,7 +41,11 @@
 import javax.ws.rs.core.Response;
 import java.util.Date;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
 
 
 @Path("/infrastructure_provision/exploratory_environment")
@@ -78,7 +82,7 @@
 				dto.getExploratoryName(), dto.getUser(), dto.getStatus());
 		requestId.checkAndRemove(dto.getRequestId());
 
-		UserInstanceDTO instance = exploratoryService.getUserInstance(dto.getUser(), dto.getExploratoryName())
+		UserInstanceDTO instance = exploratoryService.getUserInstance(dto.getUser(), dto.getProject(), dto.getExploratoryName())
 				.orElseThrow(() -> new DlabException(String.format(USER_INSTANCE_NOT_EXIST_MSG,
 						dto.getExploratoryName(), dto.getUser())));
 
@@ -89,15 +93,15 @@
 		try {
 			exploratoryDAO.updateExploratoryFields(dto.withLastActivity(new Date()));
 			if (currentStatus == TERMINATING) {
-				updateComputationalStatuses(dto.getUser(), dto.getExploratoryName(),
+				updateComputationalStatuses(dto.getUser(), dto.getProject(), dto.getExploratoryName(),
 						UserInstanceStatus.of(dto.getStatus()));
 			} else if (currentStatus == STOPPING) {
-				updateComputationalStatuses(dto.getUser(), dto.getExploratoryName(),
+				updateComputationalStatuses(dto.getUser(), dto.getProject(), dto.getExploratoryName(),
 						UserInstanceStatus.of(dto.getStatus()), TERMINATED, FAILED, TERMINATED, STOPPED);
 			}
 		} catch (DlabException e) {
-			log.error("Could not update status for exploratory environment {} for user {} to {}",
-					dto.getExploratoryName(), dto.getUser(), dto.getStatus(), e);
+			log.error("Could not update status for exploratory environment {} in project {} for user {} to {}",
+					dto.getExploratoryName(), dto.getProject(), dto.getUser(), dto.getStatus(), e);
 			throw new DlabException("Could not update status for exploratory environment " + dto.getExploratoryName() +
 					" for user " + dto.getUser() + " to " + dto.getStatus() + ": " + e.getLocalizedMessage(), e);
 		}
@@ -109,23 +113,25 @@
 	 * Updates the computational status of exploratory environment.
 	 *
 	 * @param user            user name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus status) {
+	private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus status) {
 		log.debug("updating status for all computational resources of {} for user {}: {}", exploratoryName, user,
 				status);
 		computationalDAO.updateComputationalStatusesForExploratory(new ExploratoryStatusDTO()
 				.withUser(user)
 				.withExploratoryName(exploratoryName)
+				.withProject(project)
 				.withStatus(status));
 	}
 
-	private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus
+	private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus
 			dataEngineStatus, UserInstanceStatus dataEngineServiceStatus, UserInstanceStatus... excludedStatuses) {
 		log.debug("updating status for all computational resources of {} for user {}: DataEngine {}, " +
 				"dataengine-service {}", exploratoryName, user, dataEngineStatus, dataEngineServiceStatus);
-		computationalDAO.updateComputationalStatusesForExploratory(user, exploratoryName, dataEngineStatus,
-				dataEngineServiceStatus, excludedStatuses);
+		computationalDAO.updateComputationalStatusesForExploratory(user, project, exploratoryName,
+				dataEngineStatus, dataEngineServiceStatus, excludedStatuses);
 	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java
new file mode 100644
index 0000000..9871918
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.resources.dto;
+
+import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import lombok.Data;
+
+import java.util.List;
+
+@Data
+public class ComputationalTemplatesDTO {
+    private final List<FullComputationalTemplate> templates;
+    @JsonProperty("user_computations")
+    private final List<String> userComputations;
+    @JsonProperty("project_computations")
+    private final List<String> projectComputations;
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
index 9c3eb30..14193f2 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
@@ -27,11 +27,13 @@
 @Data
 @ToString
 public class ExploratoryImageCreateFormDTO {
-
-	@NotBlank
-	@JsonProperty("exploratory_name")
-	private String notebookName;
-	@NotBlank
-	private final String name;
-	private final String description;
+    @NotBlank
+    private final String name;
+    @NotBlank
+    @JsonProperty("exploratory_name")
+    private String notebookName;
+    @NotBlank
+    @JsonProperty("project_name")
+    private String projectName;
+    private final String description;
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java
index b7f9362..17e7b91 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/HealthStatusPageDTO.java
@@ -20,13 +20,16 @@
 package com.epam.dlab.backendapi.resources.dto;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
+import lombok.Builder;
+import lombok.Data;
 
 import java.util.List;
 
 /**
  * Stores the health statuses for environment resources.
  */
+@Data
+@Builder
 public class HealthStatusPageDTO {
 	@JsonProperty
 	private String status;
@@ -37,131 +40,11 @@
 	@JsonProperty
 	private boolean admin;
 	@JsonProperty
+	private boolean projectAdmin;
+	@JsonProperty
 	private int billingQuoteUsed;
 	@JsonProperty
 	private int billingUserQuoteUsed;
 	@JsonProperty
 	private boolean projectAssigned;
-
-	/**
-	 * Return the status of environment.
-	 */
-	public String getStatus() {
-		return status;
-	}
-
-	/**
-	 * Set the status of environment.
-	 */
-	public void setStatus(HealthStatusEnum status) {
-		this.status = status == null ? null : status.toString();
-	}
-
-	/**
-	 * Set the status of environment.
-	 */
-	public void setStatus(String status) {
-		this.status = status;
-	}
-
-	public void setBillingEnabled(boolean billingEnabled) {
-		this.billingEnabled = billingEnabled;
-	}
-
-
-	/**
-	 * Set the status of environment.
-	 */
-	public HealthStatusPageDTO withStatus(String status) {
-		setStatus(status);
-		return this;
-	}
-
-	/**
-	 * Set the status of environment.
-	 */
-	public HealthStatusPageDTO withStatus(HealthStatusEnum status) {
-		setStatus(status);
-		return this;
-	}
-
-	public HealthStatusPageDTO withProjectAssinged(boolean isProjectAssigned) {
-		this.projectAssigned = isProjectAssigned;
-		return this;
-	}
-
-	/**
-	 * Return the list of resources.
-	 */
-	public List<HealthStatusResource> getListResources() {
-		return listResources;
-	}
-
-	/**
-	 * Set the list of resources.
-	 */
-	public void setListResources(List<HealthStatusResource> listResources) {
-		this.listResources = listResources;
-	}
-
-	/**
-	 * Set the list of resources.
-	 */
-	public HealthStatusPageDTO withListResources(List<HealthStatusResource> listResources) {
-		setListResources(listResources);
-		return this;
-	}
-
-	/**
-	 * Set billing enabled flag
-	 */
-	public HealthStatusPageDTO withBillingEnabled(boolean billingEnabled) {
-		setBillingEnabled(billingEnabled);
-		return this;
-	}
-
-	@Override
-	public String toString() {
-		return MoreObjects.toStringHelper(this)
-				.add("status", status)
-				.add("listResources", listResources)
-				.add("billingEnabled", billingEnabled)
-				.add("admin", admin)
-				.toString();
-	}
-
-	public HealthStatusPageDTO withAdmin(boolean isAdmin) {
-		this.admin = isAdmin;
-		return this;
-	}
-
-	public HealthStatusPageDTO withBillingQuoteUsed(int billingQuoteUsedPct) {
-		this.billingQuoteUsed = billingQuoteUsedPct;
-		return this;
-	}
-
-	public HealthStatusPageDTO withBillingUserQuoteUsed(int billingUserQuoteUsed) {
-		this.billingUserQuoteUsed = billingUserQuoteUsed;
-		return this;
-	}
-
-	public boolean isBillingEnabled() {
-		return billingEnabled;
-	}
-
-	public boolean isAdmin() {
-		return admin;
-	}
-
-	public boolean isProjectAssigned() {
-		return projectAssigned;
-	}
-
-	public int getBillingQuoteUsed() {
-		return billingQuoteUsed;
-	}
-
-	public int getBillingUserQuoteUsed() {
-		return billingUserQuoteUsed;
-	}
 }
\ No newline at end of file
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
index 1a3b8a8..c2b8d1a 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
@@ -39,6 +39,9 @@
     @JsonProperty("computational_name")
     private String computationalName;
 
+    @JsonProperty("project_name")
+    private String project;
+
     @NotEmpty
     @JsonProperty
     private List<LibInstallDTO> libs;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
index f817c4e..ff6edb6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
@@ -30,6 +30,10 @@
     private String notebookName;
 
     @NotBlank
+    @JsonProperty("project_name")
+    private String projectName;
+
+    @NotBlank
     @JsonProperty
     private String group;
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
index 21ce26d..5c90602 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
@@ -18,6 +18,7 @@
  */
 package com.epam.dlab.backendapi.resources.dto;
 
+import com.epam.dlab.cloud.CloudProvider;
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import lombok.Getter;
@@ -31,10 +32,11 @@
 @ToString
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class UserRoleDto {
-
 	@JsonProperty("_id")
 	private String id;
 	private String description;
+	private Type type;
+	private CloudProvider cloud;
 	private Set<String> pages;
 	private Set<String> computationals;
 	private Set<String> exploratories;
@@ -42,4 +44,12 @@
 	private Set<String> exploratoryShapes;
 	private Set<String> groups;
 
+	private enum Type {
+		NOTEBOOK,
+		COMPUTATIONAL,
+		NOTEBOOK_SHAPE,
+		COMPUTATIONAL_SHAPE,
+		BILLING,
+		ADMINISTRATION
+	}
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
index 43fca4a..087330a 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
@@ -39,7 +39,13 @@
 
 import javax.validation.Valid;
 import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.List;
@@ -55,13 +61,23 @@
 @Produces(MediaType.APPLICATION_JSON)
 @Slf4j
 public class ComputationalResourceGcp implements ComputationalAPI {
+	private final SelfServiceApplicationConfiguration configuration;
+	private final ComputationalService computationalService;
 
 	@Inject
-	private SelfServiceApplicationConfiguration configuration;
-	@Inject
-	private ComputationalService computationalService;
+	public ComputationalResourceGcp(SelfServiceApplicationConfiguration configuration, ComputationalService computationalService) {
+		this.configuration = configuration;
+		this.computationalService = computationalService;
+	}
 
 
+	@GET
+	@Path("/{project}/{endpoint}/templates")
+	public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+								 @PathParam("endpoint") String endpoint) {
+		return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+	}
+
 	/**
 	 * Asynchronously creates Dataproc cluster
 	 *
@@ -134,13 +150,14 @@
 	 * @return 200 OK if operation is successfully triggered
 	 */
 	@DELETE
-	@Path("/{exploratoryName}/{computationalName}/terminate")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
 	public Response terminate(@Auth UserInfo userInfo,
+							  @PathParam("projectName") String projectName,
 							  @PathParam("exploratoryName") String exploratoryName,
 							  @PathParam("computationalName") String computationalName) {
 		log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+		computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
 
 		return Response.ok().build();
 	}
@@ -156,14 +173,15 @@
 	@DELETE
 	@Path("/{project}/{exploratoryName}/{computationalName}/stop")
 	public Response stop(@Auth UserInfo userInfo,
+						 @PathParam("project") String project,
 						 @PathParam("exploratoryName") String exploratoryName,
 						 @PathParam("computationalName") String computationalName) {
 		log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
 
-		computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+		computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
 
 		return Response.ok().build();
-	}
+    }
 
 	/**
 	 * Sends request to provisioning service for starting the computational resource for user.
@@ -187,22 +205,24 @@
 	}
 
 	@PUT
-	@Path("dataengine/{exploratoryName}/{computationalName}/config")
+	@Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+										   @PathParam("projectName") String projectName,
 										   @PathParam("exploratoryName") String exploratoryName,
 										   @PathParam("computationalName") String computationalName,
 										   @Valid @NotNull List<ClusterConfig> config) {
 
-		computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+		computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
 		return Response.ok().build();
 	}
 
 	@GET
-	@Path("{exploratoryName}/{computationalName}/config")
+	@Path("/{projectName}/{exploratoryName}/{computationalName}/config")
 	public Response getClusterConfig(@Auth UserInfo userInfo,
+									 @PathParam("projectName") String projectName,
 									 @PathParam("exploratoryName") String exploratoryName,
 									 @PathParam("computationalName") String computationalName) {
-		return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+		return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
 	}
 
 	private void validate(@Auth UserInfo userInfo, GcpComputationalCreateForm formDTO) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java
index 206b143..e5343dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRole.java
@@ -23,6 +23,7 @@
 import com.google.common.base.MoreObjects.ToStringHelper;
 
 import javax.annotation.Nonnull;
+import java.util.Comparator;
 import java.util.Objects;
 import java.util.Set;
 
@@ -69,10 +70,6 @@
 		this.users = users;
 	}
 
-	UserRole(RoleType type, String name, Set<String> groups, Set<String> users) {
-		this(null, type, name, groups, users);
-	}
-
 	/**
 	 * Return the type of role.
 	 */
@@ -107,8 +104,10 @@
 
 	@Override
 	public int compareTo(@Nonnull UserRole o) {
-		int result = type.compareTo(o.type);
-		return (result == 0 ? name.compareTo(o.name) : result);
+		return Comparator.comparing(UserRole::getType)
+				.thenComparing(UserRole::getName)
+				.thenComparing(UserRole::getId, Comparator.nullsLast(String::compareToIgnoreCase))
+				.compare(this, o);
 	}
 
 	private ToStringHelper toStringHelper(Object self) {
@@ -124,7 +123,7 @@
 		if (this == o) return true;
 		if (o == null || getClass() != o.getClass()) return false;
 		UserRole userRole = (UserRole) o;
-		return this.type.equals(userRole.getType()) && this.name.equals(userRole.getName());
+		return this.id.equals(userRole.getId()) && this.type.equals(userRole.getType()) && this.name.equals(userRole.getName());
 	}
 
 	@Override
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java
index 411f798..621f2dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/roles/UserRoles.java
@@ -28,7 +28,15 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
 
 /**
  * Provides user roles access to features.
@@ -45,6 +53,7 @@
 	 * Node name of user.
 	 */
 	private static final String USERS = "users";
+	private static final String PROJECT_ADMIN_ROLE_NAME = "projectAdmin";
 	private static final String ADMIN_ROLE_NAME = "admin";
 	/**
 	 * Single instance of the user roles.
@@ -95,10 +104,22 @@
 		return checkAccess(userInfo, type, name, true, roles);
 	}
 
+	public static boolean isProjectAdmin(UserInfo userInfo) {
+		final List<UserRole> roles = UserRoles.getRoles();
+		return roles == null || roles.stream().anyMatch(r -> PROJECT_ADMIN_ROLE_NAME.equalsIgnoreCase(r.getId()) &&
+				(userRoles.hasAccessByGroup(userInfo, userInfo.getRoles(), r.getGroups()) || userRoles.hasAccessByUserName(userInfo, r)));
+	}
+
+	public static boolean isProjectAdmin(UserInfo userInfo, Set<String> groups) {
+		final List<UserRole> roles = UserRoles.getRoles();
+		return roles == null || roles.stream().anyMatch(r -> PROJECT_ADMIN_ROLE_NAME.equalsIgnoreCase(r.getId()) &&
+				(userRoles.hasAccessByGroup(userInfo, userInfo.getRoles(), retainGroups(r.getGroups(), groups)) || userRoles.hasAccessByUserName(userInfo, r)));
+	}
+
 	public static boolean isAdmin(UserInfo userInfo) {
 		final List<UserRole> roles = UserRoles.getRoles();
 		return roles == null || roles.stream().anyMatch(r -> ADMIN_ROLE_NAME.equalsIgnoreCase(r.getId()) &&
-				(userRoles.hasAccessByGroup(userInfo, r, userInfo.getRoles()) || userRoles.hasAccessByUserName(userInfo, r)));
+				(userRoles.hasAccessByGroup(userInfo, userInfo.getRoles(), r.getGroups()) || userRoles.hasAccessByUserName(userInfo, r)));
 	}
 
 	/**
@@ -181,12 +202,16 @@
 	 *
 	 * @param type type of role.
 	 * @param name the name of role.
+	 * @return list of UserRole
 	 */
-	private UserRole get(RoleType type, String name) {
-		UserRole item = new UserRole(type, name, null, null);
+	private Set<String> getGroups(RoleType type, String name) {
 		synchronized (roles) {
-			int i = Collections.binarySearch(roles, item);
-			return (i < 0 ? null : roles.get(i));
+			return roles
+					.stream()
+					.filter(r -> type == r.getType() && name.equalsIgnoreCase(r.getName()))
+					.map(UserRole::getGroups)
+					.flatMap(Collection::stream)
+					.collect(Collectors.toSet());
 		}
 	}
 
@@ -233,17 +258,18 @@
 		}
 		LOGGER.trace("Check access for user {} with groups {} to {}/{}", userInfo.getName(), userInfo.getRoles(),
 				type, name);
-		UserRole role = get(type, name);
-		if (role == null) {
+		Set<String> groups = getGroups(type, name);
+		if (groups == null || groups.isEmpty()) {
 			return checkDefault(useDefault);
 		}
-		if (hasAccessByGroup(userInfo, role, roles)) return true;
+		if (hasAccessByGroup(userInfo, roles, groups)) {
+			return true;
+		}
 		LOGGER.trace("Access denied for user {} to {}/{}", userInfo.getName(), type, name);
 		return false;
 	}
 
-	private boolean hasAccessByGroup(UserInfo userInfo, UserRole role, Collection<String> userRoles) {
-		Set<String> groups = role.getGroups();
+	private boolean hasAccessByGroup(UserInfo userInfo, Collection<String> userRoles, Collection<String> groups) {
 		if (groups != null) {
 			if (groups.contains(ANY_USER)) {
 				return true;
@@ -255,7 +281,7 @@
 				}
 			}
 
-			final Optional<String> group = role.getGroups()
+			final Optional<String> group = groups
 					.stream()
 					.filter(g -> userGroups.getOrDefault(g, Collections.emptySet()).contains(userInfo.getName().toLowerCase()))
 					.findAny();
@@ -287,12 +313,16 @@
 		}
 	}
 
+	private static Set<String> retainGroups(Set<String> groups1, Set<String> groups2) {
+		HashSet<String> result = new HashSet<>(groups1);
+		result.retainAll(groups2);
+		return result;
+	}
+
 	@Override
 	public String toString() {
 		return MoreObjects.toStringHelper(roles)
 				.addValue(roles)
 				.toString();
 	}
-
-
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java
index c16bd10..40a5c14 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingService.java
@@ -20,82 +20,20 @@
 package com.epam.dlab.backendapi.service;
 
 import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.dao.BillingDAO;
+import com.epam.dlab.backendapi.domain.BillingReport;
+import com.epam.dlab.backendapi.domain.BillingReportLine;
 import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.epam.dlab.exceptions.DlabException;
-import com.google.inject.Inject;
-import jersey.repackaged.com.google.common.collect.Lists;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
+import com.epam.dlab.dto.UserInstanceDTO;
+import com.epam.dlab.dto.billing.BillingData;
 
-import java.text.ParseException;
 import java.util.List;
 
-@Slf4j
-public abstract class BillingService {
+public interface BillingService {
+    BillingReport getBillingReport(UserInfo userInfo, BillingFilter filter);
 
-    @Inject
-    private BillingDAO billingDAO;
+    String downloadReport(UserInfo userInfo, BillingFilter filter);
 
-    public Document getReport(UserInfo userInfo, BillingFilter filter) {
-        log.trace("Get billing report for user {} with filter {}", userInfo.getName(), filter);
-        try {
-            return billingDAO.getReport(userInfo, filter);
-        } catch (RuntimeException t) {
-            log.error("Cannot load billing report for user {} with filter {}", userInfo.getName(), filter, t);
-            throw new DlabException("Cannot load billing report: " + t.getLocalizedMessage(), t);
-        }
-    }
+    List<BillingReportLine> getBillingReportLines(UserInfo userInfo, BillingFilter filter);
 
-    protected String getValueOrEmpty(Document document, String key) {
-        String value = document.getString(key);
-        return value == null ? "" : value;
-    }
-
-    String getHeaders(boolean full) {
-        return CSVFormatter.formatLine(getHeadersList(full), CSVFormatter.SEPARATOR);
-    }
-
-    public Document getBillingReport(UserInfo userInfo, BillingFilter filter) {
-        filter.getUsers().replaceAll(s -> s.equalsIgnoreCase(BaseBillingDAO.SHARED_RESOURCE_NAME) ? null : s);
-        return getReport(userInfo, filter);
-    }
-
-    public byte[] downloadReport(UserInfo userInfo, BillingFilter filter) {
-        return prepareReport(getReport(userInfo, filter)).getBytes();
-    }
-
-    String prepareReport(Document document) {
-        try {
-            StringBuilder builder =
-                    new StringBuilder(CSVFormatter.formatLine(Lists.newArrayList(getFirstLine(document)),
-                            CSVFormatter.SEPARATOR, '\"'));
-
-            Boolean full = (Boolean) document.get(BaseBillingDAO.FULL_REPORT);
-            builder.append(getHeaders(full));
-
-            @SuppressWarnings("unchecked")
-            List<Document> items = (List<Document>) document.get(BaseBillingDAO.ITEMS);
-
-            items.forEach(d -> builder.append(getLine(full, d)));
-
-            builder.append(getTotal(full, document));
-
-            return builder.toString();
-        } catch (ParseException e) {
-            throw new DlabException("Cannot prepare CSV file", e);
-        }
-    }
-
-    public abstract String getFirstLine(Document document) throws ParseException;
-
-    public abstract List<String> getHeadersList(boolean full);
-
-    public abstract String getLine(boolean full, Document document);
-
-    public abstract String getTotal(boolean full, Document document);
-
-    public abstract String getReportFileName(UserInfo userInfo, BillingFilter filter);
+    List<BillingData> getExploratoryRemoteBillingData(UserInfo user, String endpoint, List<UserInstanceDTO> userInstanceDTOS);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingServiceNew.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingServiceNew.java
deleted file mode 100644
index 0ec17a9..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/BillingServiceNew.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.domain.BillingReport;
-import com.epam.dlab.backendapi.domain.BillingReportLine;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.dto.UserInstanceDTO;
-import com.epam.dlab.dto.billing.BillingData;
-
-import java.util.List;
-
-public interface BillingServiceNew {
-    BillingReport getBillingReport(UserInfo userInfo, BillingFilter filter);
-
-    String downloadReport(UserInfo userInfo, BillingFilter filter);
-
-    List<BillingReportLine> getBillingReportLines(UserInfo userInfo, BillingFilter filter);
-
-    List<BillingData> getExploratoryRemoteBillingData(UserInfo user, String endpoint, List<UserInstanceDTO> userInstanceDTOS);
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
index 217e18e..4a6f392 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
@@ -21,16 +21,17 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.ComputationalCreateFormDTO;
+import com.epam.dlab.backendapi.resources.dto.ComputationalTemplatesDTO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
-import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
-import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.computational.UserComputationalResource;
 
 import java.util.List;
 import java.util.Optional;
 
 public interface ComputationalService {
+	ComputationalTemplatesDTO getComputationalNamesAndTemplates(UserInfo user, String project, String endpoint);
+
 	/**
 	 * Asynchronously triggers creation of Spark cluster
 	 *
@@ -46,29 +47,25 @@
 	 * Asynchronously triggers termination of computational resources
 	 *
 	 * @param userInfo          user info of authenticated user
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory where to terminate computational resources with
 	 *                          <code>computationalName</code>
 	 * @param computationalName computational name
 	 */
-	void terminateComputational(UserInfo userInfo, String exploratoryName, String computationalName);
+	void terminateComputational(UserInfo userInfo, String project, String exploratoryName, String computationalName);
 
 	boolean createDataEngineService(UserInfo userInfo, ComputationalCreateFormDTO formDTO, UserComputationalResource
 			computationalResource, String project);
 
-	void stopSparkCluster(UserInfo userInfo, String exploratoryName, String computationalName);
+	void stopSparkCluster(UserInfo userInfo, String project, String exploratoryName, String computationalName);
 
 	void startSparkCluster(UserInfo userInfo, String exploratoryName, String computationalName, String project);
 
-	void updateSparkClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName,
+	void updateSparkClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName,
 								  List<ClusterConfig> config);
 
-	void updateComputationalsReuploadKeyFlag(String user, List<UserInstanceStatus> exploratoryStatuses,
-											 List<DataEngineType> computationalTypes,
-											 boolean reuploadKeyRequired,
-											 UserInstanceStatus... computationalStatuses);
-
-	Optional<UserComputationalResource> getComputationalResource(String user, String exploratoryName,
+	Optional<UserComputationalResource> getComputationalResource(String user, String project, String exploratoryName,
 																 String computationalName);
 
-	List<ClusterConfig> getClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName);
+	List<ClusterConfig> getClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
index 5ef7cae..c605131 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
@@ -24,29 +24,23 @@
 import com.epam.dlab.backendapi.resources.dto.UserResourceInfo;
 
 import java.util.List;
-import java.util.Set;
 
 public interface EnvironmentService {
-
-	Set<String> getUserNames();
-
 	List<UserDTO> getUsers();
 
-	List<UserResourceInfo> getAllEnv();
+	List<UserResourceInfo> getAllEnv(UserInfo user);
 
 	void stopAll();
 
-	void stopEnvironment(UserInfo userInfo, String user);
-
 	void stopEnvironmentWithServiceAccount(String user);
 
 	void stopProjectEnvironment(String project);
 
-	void stopExploratory(UserInfo userInfo, String user, String exploratoryName);
+	void stopExploratory(UserInfo userInfo, String user, String project, String exploratoryName);
 
-	void stopComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName);
+	void stopComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName);
 
-	void terminateExploratory(UserInfo userInfo, String user, String exploratoryName);
+	void terminateExploratory(UserInfo userInfo, String user, String project, String exploratoryName);
 
-	void terminateComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName);
+	void terminateComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
index f744bc5..5a43fa6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
@@ -21,6 +21,7 @@
 
 
 import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.ExploratoryCreatePopUp;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
@@ -29,34 +30,29 @@
 
 import java.util.List;
 import java.util.Optional;
+import java.util.Set;
 
 public interface ExploratoryService {
 
-	String start(UserInfo userInfo, String exploratoryName, String project);
+    String start(UserInfo userInfo, String exploratoryName, String project);
 
-	String stop(UserInfo userInfo, String exploratoryName);
+    String stop(UserInfo userInfo, String project, String exploratoryName);
 
-	String terminate(UserInfo userInfo, String exploratoryName);
+    String terminate(UserInfo userInfo, String project, String exploratoryName);
 
-	String create(UserInfo userInfo, Exploratory exploratory, String project);
+    String create(UserInfo userInfo, Exploratory exploratory, String project);
 
-	void updateExploratoryStatuses(String user, UserInstanceStatus status);
+    void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status);
 
-	void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status);
+    void updateClusterConfig(UserInfo userInfo, String project, String exploratoryName, List<ClusterConfig> config);
 
-	void updateExploratoriesReuploadKeyFlag(String user, boolean reuploadKeyRequired,
-											UserInstanceStatus... exploratoryStatuses);
+    Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName);
 
-	List<UserInstanceDTO> getInstancesWithStatuses(String user, UserInstanceStatus exploratoryStatus,
-												   UserInstanceStatus computationalStatus);
+    List<UserInstanceDTO> findAll();
 
-	List<UserInstanceDTO> findAll();
+    List<UserInstanceDTO> findAll(Set<ProjectDTO> projects);
 
-	void updateClusterConfig(UserInfo userInfo, String exploratoryName, List<ClusterConfig> config);
+    List<ClusterConfig> getClusterConfig(UserInfo user, String project, String exploratoryName);
 
-	Optional<UserInstanceDTO> getUserInstance(String user, String exploratoryName);
-
-	List<ClusterConfig> getClusterConfig(UserInfo user, String exploratoryName);
-
-	ExploratoryCreatePopUp getUserInstances(UserInfo user);
+    ExploratoryCreatePopUp getUserInstances(UserInfo user);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
index 5091c97..604bdcf 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
@@ -27,13 +27,13 @@
 
 public interface ImageExploratoryService {
 
-	String createImage(UserInfo user, String exploratoryName, String imageName, String imageDescription);
+    String createImage(UserInfo user, String project, String exploratoryName, String imageName, String imageDescription);
 
-	void finishImageCreate(Image image, String exploratoryName, String newNotebookIp);
+    void finishImageCreate(Image image, String exploratoryName, String newNotebookIp);
 
-	List<ImageInfoRecord> getNotFailedImages(String user, String dockerImage, String project, String endpoint);
+    List<ImageInfoRecord> getNotFailedImages(String user, String dockerImage, String project, String endpoint);
 
-	ImageInfoRecord getImage(String user, String name, String project, String endpoint);
+    ImageInfoRecord getImage(String user, String name, String project, String endpoint);
 
-	List<ImageInfoRecord> getImagesForProject(String project);
+    List<ImageInfoRecord> getImagesForProject(String project);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
index 7b5cd44..038a7b6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
@@ -24,10 +24,10 @@
 
 public interface InactivityService {
 
-	void updateRunningResourcesLastActivity();
+    void updateRunningResourcesLastActivity();
 
-	void updateLastActivityForExploratory(UserInfo userInfo, String exploratoryName, LocalDateTime lastActivity);
+    void updateLastActivityForExploratory(UserInfo userInfo, String exploratoryName, LocalDateTime lastActivity);
 
-	void updateLastActivityForComputational(UserInfo userInfo, String exploratoryName,
-											String computationalName, LocalDateTime lastActivity);
+    void updateLastActivityForComputational(UserInfo userInfo, String project, String exploratoryName,
+                                            String computationalName, LocalDateTime lastActivity);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java
index a385925..ffb3531 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InfrastructureInfoService.java
@@ -29,7 +29,7 @@
 public interface InfrastructureInfoService {
 	List<ProjectInfrastructureInfo> getUserResources(UserInfo user);
 
-	HealthStatusPageDTO getHeathStatus(UserInfo user, boolean fullReport, boolean isAdmin);
+	HealthStatusPageDTO getHeathStatus(UserInfo user, boolean fullReport);
 
 	InfrastructureMetaInfoDTO getInfrastructureMetaInfo();
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
index 5b98293..bdd22f1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
@@ -27,12 +27,12 @@
 import java.util.List;
 
 public interface LibraryService {
-	List<Document> getLibs(String user, String exploratoryName, String computationalName);
+    List<Document> getLibs(String user, String project, String exploratoryName, String computationalName);
 
-	List<LibInfoRecord> getLibInfo(String user, String exploratoryName);
+    List<LibInfoRecord> getLibInfo(String user, String project, String exploratoryName);
 
-	String installComputationalLibs(UserInfo userInfo, String exploratoryName, String computationalName,
-									List<LibInstallDTO> libs);
+    String installComputationalLibs(UserInfo userInfo, String project, String exploratoryName, String computationalName,
+                                    List<LibInstallDTO> libs);
 
-	String installExploratoryLibs(UserInfo userInfo, String exploratoryName, List<LibInstallDTO> libs);
+    String installExploratoryLibs(UserInfo userInfo, String project, String exploratoryName, List<LibInstallDTO> libs);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
index 738fbdb..fa0aedc 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
@@ -2,7 +2,6 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
-import com.epam.dlab.backendapi.domain.ProjectManagingDTO;
 import com.epam.dlab.backendapi.domain.UpdateProjectDTO;
 
 import java.util.List;
@@ -10,12 +9,10 @@
 public interface ProjectService {
 	List<ProjectDTO> getProjects();
 
-	List<ProjectManagingDTO> getProjectsForManaging();
+	List<ProjectDTO> getProjects(UserInfo user);
 
 	List<ProjectDTO> getUserProjects(UserInfo userInfo, boolean active);
 
-	List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status);
-
 	List<ProjectDTO> getProjectsByEndpoint(String endpointName);
 
 	void create(UserInfo userInfo, ProjectDTO projectDTO);
@@ -26,21 +23,15 @@
 
 	void terminateEndpoint(UserInfo userInfo, List<String> endpoints, String name);
 
-	void terminateProject(UserInfo userInfo, String name);
-
 	void start(UserInfo userInfo, String endpoint, String name);
 
 	void start(UserInfo userInfo, List<String> endpoints, String name);
 
 	void stop(UserInfo userInfo, String endpoint, String name);
 
-	void stop(UserInfo userInfo, List<String> endpoints, String name);
+	void stopWithResources(UserInfo userInfo, List<String> endpoints, String projectName);
 
-	void stopWithResources(UserInfo userInfo, String projectName);
-
-	void update(UserInfo userInfo, UpdateProjectDTO projectDTO);
-
-	void updateBudget(String project, Integer budget);
+	void update(UserInfo userInfo, UpdateProjectDTO projectDTO, String projectName);
 
 	void updateBudget(List<ProjectDTO> projects);
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
index 1059db0..7702601 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
@@ -25,57 +25,61 @@
 import java.util.List;
 
 public interface SchedulerJobService {
-	/**
-	 * Pulls out scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user            user's name
-	 * @param exploratoryName name of exploratory resource
-	 * @return dto object
-	 */
-	SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String exploratoryName);
+    /**
+     * Pulls out scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user            user's name
+     * @param project         project name
+     * @param exploratoryName name of exploratory resource
+     * @return dto object
+     */
+    SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String project, String exploratoryName);
 
-	/**
-	 * Pulls out scheduler job data for computational resource <code>computationalName<code/> affiliated with
-	 * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user              user's name
-	 * @param exploratoryName   name of exploratory resource
-	 * @param computationalName name of computational resource
-	 * @return dto object
-	 */
-	SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String exploratoryName,
-															  String computationalName);
+    /**
+     * Pulls out scheduler job data for computational resource <code>computationalName<code/> affiliated with
+     * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user              user's name
+     * @param project           project name
+     * @param exploratoryName   name of exploratory resource
+     * @param computationalName name of computational resource
+     * @return dto object
+     */
+    SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String project, String exploratoryName,
+                                                              String computationalName);
 
-	/**
-	 * Updates scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user            user's name
-	 * @param exploratoryName name of exploratory resource
-	 * @param dto             scheduler job data
-	 */
-	void updateExploratorySchedulerData(String user, String exploratoryName, SchedulerJobDTO dto);
+    /**
+     * Updates scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user            user's name
+     * @param project         project name
+     * @param exploratoryName name of exploratory resource
+     * @param dto             scheduler job data
+     */
+    void updateExploratorySchedulerData(String user, String project, String exploratoryName, SchedulerJobDTO dto);
 
-	/**
-	 * Updates scheduler job data for computational resource <code>computationalName<code/> affiliated with
-	 * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
-	 *
-	 * @param user              user's name
-	 * @param exploratoryName   name of exploratory resource
-	 * @param computationalName name of computational resource
-	 * @param dto               scheduler job data
-	 */
-	void updateComputationalSchedulerData(String user, String exploratoryName,
-										  String computationalName, SchedulerJobDTO dto);
+    /**
+     * Updates scheduler job data for computational resource <code>computationalName<code/> affiliated with
+     * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+     *
+     * @param user              user's name
+     * @param project           project name
+     * @param exploratoryName   name of exploratory resource
+     * @param computationalName name of computational resource
+     * @param dto               scheduler job data
+     */
+    void updateComputationalSchedulerData(String user, String project, String exploratoryName,
+                                          String computationalName, SchedulerJobDTO dto);
 
-	void stopComputationalByScheduler();
+    void stopComputationalByScheduler();
 
-	void stopExploratoryByScheduler();
+    void stopExploratoryByScheduler();
 
-	void startExploratoryByScheduler();
+    void startExploratoryByScheduler();
 
-	void startComputationalByScheduler();
+    void startComputationalByScheduler();
 
-	void terminateExploratoryByScheduler();
+    void terminateExploratoryByScheduler();
 
 	void terminateComputationalByScheduler();
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ShapeFormat.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ShapeFormat.java
deleted file mode 100644
index da224ab..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ShapeFormat.java
+++ /dev/null
@@ -1,5 +0,0 @@
-package com.epam.dlab.backendapi.service;
-
-public interface ShapeFormat {
-    String format();
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java
index e9e3e47..94e89e3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserGroupService.java
@@ -18,6 +18,7 @@
  */
 package com.epam.dlab.backendapi.service;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 
 import java.util.List;
@@ -26,17 +27,10 @@
 public interface UserGroupService {
 
 	void createGroup(String group, Set<String> roleIds, Set<String> users);
-	void updateGroup(String group, Set<String> roleIds, Set<String> users);
 
-	void addUsersToGroup(String group, Set<String> users);
-
-	void updateRolesForGroup(String group, Set<String> roleIds);
-
-	void removeUserFromGroup(String group, String user);
-
-	void removeGroupFromRole(Set<String> groups, Set<String> roleIds);
+	void updateGroup(UserInfo user, String group, Set<String> roleIds, Set<String> users);
 
 	void removeGroup(String groupId);
 
-	List<UserGroupDto> getAggregatedRolesByGroup();
+	List<UserGroupDto> getAggregatedRolesByGroup(UserInfo user);
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleService.java
index 0b22b1d..a010684 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleService.java
@@ -18,13 +18,14 @@
  */
 package com.epam.dlab.backendapi.service;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
 
 import java.util.List;
 
 public interface UserRoleService {
 
-	List<UserRoleDto> getUserRoles();
+	List<UserRoleDto> getUserRoles(UserInfo userInfo);
 
 	void createRole(UserRoleDto dto);
 
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java
index ce9462f..6940533 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/UserRoleServiceImpl.java
@@ -18,8 +18,11 @@
  */
 package com.epam.dlab.backendapi.service;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.UserRoleDao;
 import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
@@ -27,16 +30,29 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 @Singleton
 public class UserRoleServiceImpl implements UserRoleService {
 	private static final String ROLE_NOT_FOUND_MSG = "Any of role : %s were not found";
+	private static final String ADMIN = "admin";
+
 	@Inject
 	private UserRoleDao userRoleDao;
 
 	@Override
-	public List<UserRoleDto> getUserRoles() {
-		return userRoleDao.findAll();
+	public List<UserRoleDto> getUserRoles(UserInfo user) {
+		List<UserRoleDto> all = userRoleDao.findAll();
+		if (UserRoles.isAdmin(user)) {
+			return all;
+		} else if (UserRoles.isProjectAdmin(user)) {
+			return all
+					.stream()
+					.filter(role -> !role.getId().equalsIgnoreCase(ADMIN))
+					.collect(Collectors.toList());
+		} else {
+			throw new DlabException(String.format("User %s doesn't have appropriate permission", user));
+		}
 	}
 
 	@Override
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsBillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsBillingService.java
deleted file mode 100644
index eb94ea5..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/aws/AwsBillingService.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.epam.dlab.model.aws.ReportLine;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.List;
-
-@Slf4j
-@Singleton
-public class AwsBillingService extends BillingService {
-
-	@Override
-	public String getReportFileName(UserInfo userInfo, BillingFilter filter) {
-		return "aws-billing-report.csv";
-	}
-
-	public String getFirstLine(Document document) throws ParseException {
-
-		SimpleDateFormat from = new SimpleDateFormat("yyyy-MM-dd");
-		SimpleDateFormat to = new SimpleDateFormat("MMM dd, yyyy");
-
-		return String.format("Service base name: %s  " +
-						"Resource tag ID: %s  " +
-						"Available reporting period from: %s to: %s",
-				document.get(AwsBillingDAO.SERVICE_BASE_NAME), document.get(AwsBillingDAO.TAG_RESOURCE_ID),
-				to.format(from.parse((String) document.get(AwsBillingDAO.USAGE_DATE_START))),
-				to.format(from.parse((String) document.get(AwsBillingDAO.USAGE_DATE_END))));
-
-	}
-
-	public List<String> getHeadersList(boolean full) {
-		List<String> headers = new ArrayList<>();
-
-		if (full) {
-			headers.add("USER");
-		}
-
-		headers.add("PROJECT");
-		headers.add("ENVIRONMENT NAME");
-		headers.add("RESOURCE TYPE");
-		headers.add("SHAPE");
-		headers.add("SERVICE");
-		headers.add("SERVICE CHARGES");
-
-		return headers;
-	}
-
-	public String getLine(boolean full, Document document) {
-		List<String> items = new ArrayList<>();
-
-		if (full) {
-			items.add(getValueOrEmpty(document, ReportLine.FIELD_USER_ID));
-		}
-
-		items.add(getValueOrEmpty(document, ReportLine.FIELD_PROJECT));
-		items.add(getValueOrEmpty(document, ReportLine.FIELD_DLAB_ID));
-		items.add(getValueOrEmpty(document, AwsBillingDAO.DLAB_RESOURCE_TYPE));
-		items.add(getValueOrEmpty(document, AwsBillingDAO.SHAPE).replace(System.lineSeparator(), " "));
-		items.add(getValueOrEmpty(document, ReportLine.FIELD_PRODUCT));
-
-		items.add(getValueOrEmpty(document, ReportLine.FIELD_COST)
-				+ " " + getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE));
-
-		return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-	}
-
-	public String getTotal(boolean full, Document document) {
-		int padding = getHeadersList(full).size() - 1;
-
-		List<String> items = new ArrayList<>();
-		while (padding-- > 0) {
-			items.add("");
-		}
-
-		items.add(String.format("Total: %s %s", getValueOrEmpty(document, AwsBillingDAO.COST_TOTAL),
-				getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE)));
-
-		return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-
-	}
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureBillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureBillingService.java
deleted file mode 100644
index 9ff33a8..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/azure/AzureBillingService.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.MongoKeyWords;
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.BaseBillingDAO;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.dao.azure.AzureBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.epam.dlab.model.aws.ReportLine;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-import org.bson.Document;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.List;
-
-@Slf4j
-@Singleton
-public class AzureBillingService extends BillingService {
-
-    @Inject
-    private BillingDAO billingDAO;
-
-    @Override
-    public String getReportFileName(UserInfo userInfo, BillingFilter filter) {
-        return "azure-billing-report.csv";
-    }
-
-    @Override
-    public String getFirstLine(Document document) throws ParseException {
-        SimpleDateFormat from = new SimpleDateFormat("yyyy-MM-dd");
-        SimpleDateFormat to = new SimpleDateFormat("MMM dd, yyyy");
-
-        return String.format("Service base name: %s  " +
-                        "Available reporting period from: %s to: %s",
-                document.get(BaseBillingDAO.SERVICE_BASE_NAME),
-                to.format(from.parse((String) document.get(MongoKeyWords.USAGE_FROM))),
-                to.format(from.parse((String) document.get(MongoKeyWords.USAGE_TO))));
-    }
-
-    public List<String> getHeadersList(boolean full) {
-        List<String> headers = new ArrayList<>();
-
-        if (full) {
-            headers.add("USER");
-        }
-
-        headers.add("PROJECT");
-        headers.add("ENVIRONMENT NAME");
-        headers.add("RESOURCE TYPE");
-        headers.add("INSTANCE SIZE");
-        headers.add("CATEGORY");
-        headers.add("SERVICE CHARGES");
-
-        return headers;
-    }
-
-    @Override
-    public String getLine(boolean full, Document document) {
-        List<String> items = new ArrayList<>();
-
-        if (full) {
-            items.add(getValueOrEmpty(document, MongoKeyWords.DLAB_USER));
-        }
-
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_PROJECT));
-        items.add(getValueOrEmpty(document, MongoKeyWords.DLAB_ID));
-        items.add(getValueOrEmpty(document, MongoKeyWords.RESOURCE_TYPE));
-        items.add(getValueOrEmpty(document, AzureBillingDAO.SIZE).replace(System.lineSeparator(), " "));
-        items.add(getValueOrEmpty(document, MongoKeyWords.METER_CATEGORY));
-
-        items.add(getValueOrEmpty(document, MongoKeyWords.COST_STRING)
-                + " " + getValueOrEmpty(document, MongoKeyWords.CURRENCY_CODE));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-
-    @Override
-    public String getTotal(boolean full, Document document) {
-        int padding = getHeadersList(full).size() - 1;
-
-        List<String> items = new ArrayList<>();
-        while (padding-- > 0) {
-            items.add("");
-        }
-
-        items.add(String.format("Total: %s %s", getValueOrEmpty(document, MongoKeyWords.COST_STRING),
-                getValueOrEmpty(document, MongoKeyWords.CURRENCY_CODE)));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpBillingService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpBillingService.java
deleted file mode 100644
index a7599f7..0000000
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/gcp/GcpBillingService.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.gcp;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.backendapi.service.BillingService;
-import com.epam.dlab.backendapi.util.CSVFormatter;
-import com.epam.dlab.model.aws.ReportLine;
-import org.bson.Document;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.List;
-
-public class GcpBillingService extends BillingService {
-    @Override
-    public String getFirstLine(Document document) throws ParseException {
-        SimpleDateFormat from = new SimpleDateFormat("yyyy-MM-dd");
-        SimpleDateFormat to = new SimpleDateFormat("MMM dd, yyyy");
-
-        return String.format("Service base name: %s Available reporting period from: %s to: %s",
-                document.get(AwsBillingDAO.SERVICE_BASE_NAME),
-                to.format(from.parse((String) document.get("from"))),
-                to.format(from.parse((String) document.get("to"))));
-    }
-
-    @Override
-    public List<String> getHeadersList(boolean full) {
-        List<String> headers = new ArrayList<>();
-
-        if (full) {
-            headers.add("USER");
-        }
-
-        headers.add("PROJECT");
-        headers.add("ENVIRONMENT NAME");
-        headers.add("RESOURCE TYPE");
-        headers.add("SHAPE");
-        headers.add("SERVICE");
-        headers.add("SERVICE CHARGES");
-
-        return headers;
-    }
-
-    @Override
-    public String getLine(boolean full, Document document) {
-        List<String> items = new ArrayList<>();
-
-        if (full) {
-            items.add(getValueOrEmpty(document, ReportLine.FIELD_USER_ID));
-        }
-
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_PROJECT));
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_DLAB_ID));
-        items.add(getValueOrEmpty(document, AwsBillingDAO.DLAB_RESOURCE_TYPE));
-        items.add(getValueOrEmpty(document, AwsBillingDAO.SHAPE).replace(System.lineSeparator(), " "));
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_PRODUCT));
-
-        items.add(getValueOrEmpty(document, ReportLine.FIELD_COST)
-                + " " + getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-
-    @Override
-    public String getTotal(boolean full, Document document) {
-        int padding = getHeadersList(full).size() - 1;
-
-        List<String> items = new ArrayList<>();
-        while (padding-- > 0) {
-            items.add("");
-        }
-
-        items.add(String.format("Total: %s %s", getValueOrEmpty(document, AwsBillingDAO.COST_TOTAL),
-                getValueOrEmpty(document, ReportLine.FIELD_CURRENCY_CODE)));
-
-        return CSVFormatter.formatLine(items, CSVFormatter.SEPARATOR);
-    }
-
-    @Override
-    public String getReportFileName(UserInfo userInfo, BillingFilter filter) {
-        return "gcp-billing-report.csv";
-    }
-}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImplNew.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImpl.java
similarity index 77%
rename from services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImplNew.java
rename to services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImpl.java
index 0f26d32..0d77367 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImplNew.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/BillingServiceImpl.java
@@ -21,6 +21,7 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
+import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
 import com.epam.dlab.backendapi.domain.BillingReport;
 import com.epam.dlab.backendapi.domain.BillingReportLine;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
@@ -29,7 +30,7 @@
 import com.epam.dlab.backendapi.resources.dto.BillingFilter;
 import com.epam.dlab.backendapi.roles.RoleType;
 import com.epam.dlab.backendapi.roles.UserRoles;
-import com.epam.dlab.backendapi.service.BillingServiceNew;
+import com.epam.dlab.backendapi.service.BillingService;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.ExploratoryService;
 import com.epam.dlab.backendapi.service.ProjectService;
@@ -44,7 +45,6 @@
 import com.google.inject.name.Named;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.collections4.CollectionUtils;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.http.client.utils.URIBuilder;
 
 import javax.ws.rs.core.GenericType;
@@ -56,9 +56,11 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -69,7 +71,7 @@
 import java.util.stream.Stream;
 
 @Slf4j
-public class BillingServiceImplNew implements BillingServiceNew {
+public class BillingServiceImpl implements BillingService {
     private static final String BILLING_PATH = "/api/billing";
     private static final String BILLING_REPORT_PATH = "/api/billing/report";
 
@@ -78,16 +80,20 @@
     private final ExploratoryService exploratoryService;
     private final SelfServiceApplicationConfiguration configuration;
     private final RESTService provisioningService;
+    private final ImageExploratoryDao imageExploratoryDao;
+    private final String sbn;
 
     @Inject
-    public BillingServiceImplNew(ProjectService projectService, EndpointService endpointService,
-                                 ExploratoryService exploratoryService, SelfServiceApplicationConfiguration configuration,
-                                 @Named(ServiceConsts.PROVISIONING_SERVICE_NAME) RESTService provisioningService) {
+    public BillingServiceImpl(ProjectService projectService, EndpointService endpointService,
+                              ExploratoryService exploratoryService, SelfServiceApplicationConfiguration configuration,
+                              @Named(ServiceConsts.PROVISIONING_SERVICE_NAME) RESTService provisioningService, ImageExploratoryDao imageExploratoryDao) {
         this.projectService = projectService;
         this.endpointService = endpointService;
         this.exploratoryService = exploratoryService;
         this.configuration = configuration;
         this.provisioningService = provisioningService;
+        this.imageExploratoryDao = imageExploratoryDao;
+        sbn = configuration.getServiceBaseName();
     }
 
     @Override
@@ -98,7 +104,7 @@
         double sum = billingReportLines.stream().mapToDouble(BillingReportLine::getCost).sum();
         String currency = billingReportLines.stream().map(BillingReportLine::getCurrency).distinct().count() == 1 ? billingReportLines.get(0).getCurrency() : null;
         return BillingReport.builder()
-                .sbn(configuration.getServiceBaseName())
+                .sbn(sbn)
                 .reportLines(billingReportLines)
                 .usageDateFrom(min)
                 .usageDateTo(max)
@@ -126,28 +132,15 @@
     @Override
     public List<BillingReportLine> getBillingReportLines(UserInfo user, BillingFilter filter) {
         setUserFilter(user, filter);
-        final String serviceBaseName = configuration.getServiceBaseName();
-        final Stream<BillingReportLine> ssnBillingDataStream = BillingUtils.ssnBillingDataStream(serviceBaseName);
-        final Stream<BillingReportLine> billableUserInstances = exploratoryService.findAll()
-                .stream()
-                .filter(userInstance -> Objects.nonNull(userInstance.getExploratoryId()))
-                .flatMap(ui -> BillingUtils.exploratoryBillingDataStream(ui, configuration.getMaxSparkInstanceCount()));
-        final Stream<BillingReportLine> billableEdges = projectService.getProjects()
-                .stream()
-                .collect(Collectors.toMap(ProjectDTO::getName, ProjectDTO::getEndpoints))
-                .entrySet()
-                .stream()
-                .flatMap(e -> projectEdges(serviceBaseName, e.getKey(), e.getValue()));
+        Set<ProjectDTO> projects = new HashSet<>(projectService.getProjects(user));
+        projects.addAll(projectService.getUserProjects(user, false));
 
-        final Map<String, BillingReportLine> billableResources = Stream.of(billableUserInstances, billableEdges, ssnBillingDataStream)
-                .flatMap(s -> s)
-                .collect(Collectors.toMap(BillingReportLine::getDlabId, b -> b));
-        log.debug("Billable resources are: {}", billableResources);
+        final Map<String, BillingReportLine> billableResources = getBillableResources(user, projects);
 
-        List<BillingReportLine> billingReport = getRemoteBillingData(user)
+        List<BillingReportLine> billingReport = getRemoteBillingData(user, filter)
                 .stream()
-                .filter(getBillingDataFilter(filter))
-                .map(bd -> toBillingData(bd, getOrDefault(billableResources, bd.getTag())))
+                .filter(bd -> billableResources.containsKey(bd.getTag()))
+                .map(bd -> toBillingData(bd, billableResources.get(bd.getTag())))
                 .filter(getBillingReportFilter(filter))
                 .collect(Collectors.toList());
         log.debug("Billing report: {}", billingReport);
@@ -155,6 +148,42 @@
         return billingReport;
     }
 
+    private Map<String, BillingReportLine> getBillableResources(UserInfo user, Set<ProjectDTO> projects) {
+        Stream<BillingReportLine> billableAdminResources = Stream.empty();
+        final Stream<BillingReportLine> billableUserInstances = exploratoryService.findAll(projects)
+                .stream()
+                .filter(userInstance -> Objects.nonNull(userInstance.getExploratoryId()))
+                .flatMap(ui -> BillingUtils.exploratoryBillingDataStream(ui, configuration.getMaxSparkInstanceCount(), sbn));
+        final Stream<BillingReportLine> billingReportLineStream = projects
+                .stream()
+                .map(p -> imageExploratoryDao.getImagesForProject(p.getName()))
+                .flatMap(Collection::stream)
+                .flatMap(i -> BillingUtils.customImageBillingDataStream(i, sbn));
+
+        if (UserRoles.isAdmin(user)) {
+            final Stream<BillingReportLine> billableEdges = projects
+                    .stream()
+                    .collect(Collectors.toMap(ProjectDTO::getName, ProjectDTO::getEndpoints))
+                    .entrySet()
+                    .stream()
+                    .flatMap(e -> projectEdges(sbn, e.getKey(), e.getValue()));
+            final Stream<BillingReportLine> ssnBillingDataStream = BillingUtils.ssnBillingDataStream(sbn);
+            final Stream<BillingReportLine> billableSharedEndpoints = endpointService.getEndpoints()
+                    .stream()
+                    .flatMap(endpoint -> BillingUtils.sharedEndpointBillingDataStream(endpoint.getName(), sbn));
+
+            billableAdminResources = Stream.of(billableEdges, ssnBillingDataStream, billableSharedEndpoints)
+                    .flatMap(s -> s);
+        }
+
+        final Map<String, BillingReportLine> billableResources = Stream.of(billableUserInstances, billingReportLineStream, billableAdminResources)
+                .flatMap(s -> s)
+                .collect(Collectors.toMap(BillingReportLine::getDlabId, b -> b));
+        log.debug("Billable resources are: {}", billableResources);
+
+        return billableResources;
+    }
+
     private Stream<BillingReportLine> projectEdges(String serviceBaseName, String projectName, List<ProjectEndpointDTO> endpoints) {
         return endpoints
                 .stream()
@@ -162,10 +191,6 @@
                         endpoint.getStatus().toString()));
     }
 
-    private BillingReportLine getOrDefault(Map<String, BillingReportLine> billableResources, String tag) {
-        return billableResources.getOrDefault(tag, BillingReportLine.builder().dlabId(tag).build());
-    }
-
     public List<BillingData> getExploratoryRemoteBillingData(UserInfo user, String endpoint, List<UserInstanceDTO> userInstanceDTOS) {
         List<String> dlabIds = null;
         try {
@@ -189,11 +214,11 @@
         }
     }
 
-    private List<BillingData> getRemoteBillingData(UserInfo userInfo) {
+    private List<BillingData> getRemoteBillingData(UserInfo userInfo, BillingFilter filter) {
         List<EndpointDTO> endpoints = endpointService.getEndpoints();
         ExecutorService executor = Executors.newFixedThreadPool(endpoints.size());
         List<Callable<List<BillingData>>> callableTasks = new ArrayList<>();
-        endpoints.forEach(e -> callableTasks.add(getTask(userInfo, getBillingUrl(e.getUrl(), BILLING_REPORT_PATH))));
+        endpoints.forEach(e -> callableTasks.add(getTask(userInfo, getBillingUrl(e.getUrl(), BILLING_REPORT_PATH), filter)));
 
         List<BillingData> billingData;
         try {
@@ -216,8 +241,8 @@
         try {
             return s.get();
         } catch (InterruptedException | ExecutionException e) {
-            log.error("Cannot retrieve billing information {}", e.getMessage(), e);
-            throw new DlabException("Cannot retrieve billing information");
+            log.error("Cannot retrieve billing information {}", e.getMessage());
+            return Collections.emptyList();
         }
     }
 
@@ -237,17 +262,16 @@
                 .toString();
     }
 
-    private Callable<List<BillingData>> getTask(UserInfo userInfo, String url) {
-        return () -> provisioningService.get(url, userInfo.getAccessToken(), new GenericType<List<BillingData>>() {
-        });
-    }
-
-    private Predicate<BillingData> getBillingDataFilter(BillingFilter filter) {
-        return br ->
-                (StringUtils.isEmpty(filter.getDlabId()) || StringUtils.containsIgnoreCase(br.getTag(), filter.getDlabId())) &&
-                        (StringUtils.isEmpty(filter.getDateStart()) || LocalDate.parse(filter.getDateStart()).isEqual(br.getUsageDateFrom()) || LocalDate.parse(filter.getDateStart()).isBefore(br.getUsageDateFrom())) &&
-                        (StringUtils.isEmpty(filter.getDateEnd()) || LocalDate.parse(filter.getDateEnd()).isEqual(br.getUsageDateTo()) || LocalDate.parse(filter.getDateEnd()).isAfter(br.getUsageDateTo())) &&
-                        (CollectionUtils.isEmpty(filter.getProducts()) || filter.getProducts().contains(br.getProduct()));
+    private Callable<List<BillingData>> getTask(UserInfo userInfo, String url, BillingFilter filter) {
+        return () -> provisioningService.get(url, userInfo.getAccessToken(),
+                new GenericType<List<BillingData>>() {
+                },
+                Stream.of(new String[][]{
+                        {"date-start", filter.getDateStart()},
+                        {"date-end", filter.getDateEnd()},
+                        {"dlab-id", filter.getDlabId()},
+                        {"product", String.join(",", filter.getProducts())}
+                }).collect(Collectors.toMap(data -> data[0], data -> data[1])));
     }
 
     private Predicate<BillingReportLine> getBillingReportFilter(BillingFilter filter) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
index 52decf8..722ee4d 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
@@ -29,9 +29,11 @@
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.resources.dto.ComputationalCreateFormDTO;
+import com.epam.dlab.backendapi.resources.dto.ComputationalTemplatesDTO;
 import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
 import com.epam.dlab.backendapi.service.ComputationalService;
 import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.InfrastructureTemplateService;
 import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.TagService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
@@ -41,7 +43,12 @@
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
-import com.epam.dlab.dto.computational.*;
+import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStatusDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.SparkStandaloneClusterResource;
+import com.epam.dlab.dto.computational.UserComputationalResource;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.epam.dlab.rest.client.RESTService;
@@ -51,12 +58,19 @@
 import com.google.inject.name.Named;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Collection;
 import java.util.EnumMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.stream.Collectors;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.RECONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
 import static com.epam.dlab.dto.base.DataEngineType.CLOUD_SERVICE;
 import static com.epam.dlab.dto.base.DataEngineType.SPARK_STANDALONE;
 import static com.epam.dlab.rest.contracts.ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC;
@@ -78,24 +92,54 @@
 		DATA_ENGINE_TYPE_TERMINATE_URLS.put(CLOUD_SERVICE, ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC);
 	}
 
-	@Inject
-	private ProjectService projectService;
-	@Inject
-	private ExploratoryDAO exploratoryDAO;
-	@Inject
-	private ComputationalDAO computationalDAO;
-	@Inject
-	@Named(ServiceConsts.PROVISIONING_SERVICE_NAME)
-	private RESTService provisioningService;
-	@Inject
-	private RequestBuilder requestBuilder;
-	@Inject
-	private RequestId requestId;
-	@Inject
-	private TagService tagService;
-	@Inject
-	private EndpointService endpointService;
+	private final ProjectService projectService;
+	private final ExploratoryDAO exploratoryDAO;
+	private final ComputationalDAO computationalDAO;
+	private final RESTService provisioningService;
+	private final RequestBuilder requestBuilder;
+	private final RequestId requestId;
+	private final TagService tagService;
+	private final EndpointService endpointService;
+	private final InfrastructureTemplateService templateService;
 
+	@Inject
+	public ComputationalServiceImpl(ProjectService projectService, ExploratoryDAO exploratoryDAO, ComputationalDAO computationalDAO,
+									@Named(ServiceConsts.PROVISIONING_SERVICE_NAME) RESTService provisioningService,
+									RequestBuilder requestBuilder, RequestId requestId, TagService tagService,
+									EndpointService endpointService, InfrastructureTemplateService templateService) {
+		this.projectService = projectService;
+		this.exploratoryDAO = exploratoryDAO;
+		this.computationalDAO = computationalDAO;
+		this.provisioningService = provisioningService;
+		this.requestBuilder = requestBuilder;
+		this.requestId = requestId;
+		this.tagService = tagService;
+		this.endpointService = endpointService;
+		this.templateService = templateService;
+	}
+
+
+	@Override
+	public ComputationalTemplatesDTO getComputationalNamesAndTemplates(UserInfo user, String project, String endpoint) {
+		List<FullComputationalTemplate> computationalTemplates = templateService.getComputationalTemplates(user, project, endpoint);
+		List<UserInstanceDTO> userInstances = exploratoryDAO.fetchExploratoryFieldsForProjectWithComp(project);
+
+		List<String> projectComputations = userInstances
+				.stream()
+				.map(UserInstanceDTO::getResources)
+				.flatMap(Collection::stream)
+				.map(UserComputationalResource::getComputationalName)
+				.collect(Collectors.toList());
+		List<String> userComputations = userInstances
+				.stream()
+				.filter(instance -> instance.getUser().equalsIgnoreCase(user.getName()))
+				.map(UserInstanceDTO::getResources)
+				.flatMap(Collection::stream)
+				.map(UserComputationalResource::getComputationalName)
+				.collect(Collectors.toList());
+
+		return new ComputationalTemplatesDTO(computationalTemplates, userComputations, projectComputations);
+	}
 
 	@BudgetLimited
 	@Override
@@ -103,11 +147,11 @@
 
 		final ProjectDTO projectDTO = projectService.get(project);
 		final UserInstanceDTO instance =
-				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), form.getNotebookName());
+				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, form.getNotebookName());
 		final SparkStandaloneClusterResource compResource = createInitialComputationalResource(form);
 		compResource.setTags(tagService.getResourceTags(userInfo, instance.getEndpoint(), project,
 				form.getCustomTag()));
-		if (computationalDAO.addComputational(userInfo.getName(), form.getNotebookName(), compResource)) {
+		if (computationalDAO.addComputational(userInfo.getName(), form.getNotebookName(), project, compResource)) {
 			try {
 				EndpointDTO endpointDTO = endpointService.get(instance.getEndpoint());
 				ComputationalBase<?> dto = requestBuilder.newComputationalCreate(userInfo, projectDTO, instance, form, endpointDTO);
@@ -119,7 +163,7 @@
 				return true;
 			} catch (RuntimeException e) {
 				try {
-					updateComputationalStatus(userInfo.getName(), form.getNotebookName(), form.getName(), FAILED);
+					updateComputationalStatus(userInfo.getName(), project, form.getNotebookName(), form.getName(), FAILED);
 				} catch (DlabException d) {
 					log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, form.getName(), userInfo.getName(), d);
 				}
@@ -133,15 +177,15 @@
 	}
 
 	@Override
-	public void terminateComputational(UserInfo userInfo, String exploratoryName, String computationalName) {
+	public void terminateComputational(UserInfo userInfo, String project, String exploratoryName, String computationalName) {
 		try {
 
-			updateComputationalStatus(userInfo.getName(), exploratoryName, computationalName, TERMINATING);
+			updateComputationalStatus(userInfo.getName(), project, exploratoryName, computationalName, TERMINATING);
 
-			final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+			final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project,
 					exploratoryName);
-			UserComputationalResource compResource = computationalDAO.fetchComputationalFields(userInfo
-					.getName(), exploratoryName, computationalName);
+			UserComputationalResource compResource = computationalDAO.fetchComputationalFields(userInfo.getName(), project,
+					exploratoryName, computationalName);
 
 			final DataEngineType dataEngineType = compResource.getDataEngineType();
 			EndpointDTO endpointDTO = endpointService.get(userInstanceDTO.getEndpoint());
@@ -156,7 +200,7 @@
 		} catch (RuntimeException re) {
 
 			try {
-				updateComputationalStatus(userInfo.getName(), exploratoryName, computationalName, FAILED);
+				updateComputationalStatus(userInfo.getName(), project, exploratoryName, computationalName, FAILED);
 			} catch (DlabException e) {
 				log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, computationalName, userInfo.getName(), e);
 			}
@@ -171,12 +215,12 @@
 										   UserComputationalResource computationalResource, @Project String project) {
 
 		final ProjectDTO projectDTO = projectService.get(project);
-		final UserInstanceDTO instance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO
+		final UserInstanceDTO instance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, formDTO
 				.getNotebookName());
 		final Map<String, String> tags = tagService.getResourceTags(userInfo, instance.getEndpoint(), project,
 				formDTO.getCustomTag());
 		computationalResource.setTags(tags);
-		boolean isAdded = computationalDAO.addComputational(userInfo.getName(), formDTO.getNotebookName(),
+		boolean isAdded = computationalDAO.addComputational(userInfo.getName(), formDTO.getNotebookName(), project,
 				computationalResource);
 
 		if (isAdded) {
@@ -191,8 +235,8 @@
 				return true;
 			} catch (Exception t) {
 				try {
-					updateComputationalStatus(userInfo.getName(), formDTO.getNotebookName(), formDTO.getName(),
-							FAILED);
+					updateComputationalStatus(userInfo.getName(), project, formDTO.getNotebookName(),
+							formDTO.getName(), FAILED);
 				} catch (DlabException e) {
 					log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, formDTO.getName(), userInfo.getName(), e);
 				}
@@ -206,12 +250,12 @@
 	}
 
 	@Override
-	public void stopSparkCluster(UserInfo userInfo, String expName, String compName) {
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), expName, true);
+	public void stopSparkCluster(UserInfo userInfo, String project, String expName, String compName) {
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, expName, true);
 		final UserInstanceStatus requiredStatus = UserInstanceStatus.RUNNING;
 		if (computationalWithStatusResourceExist(compName, userInstance, requiredStatus)) {
 			log.debug("{} spark cluster {} for userInstance {}", STOPPING.toString(), compName, expName);
-			updateComputationalStatus(userInfo.getName(), expName, compName, STOPPING);
+			updateComputationalStatus(userInfo.getName(), project, expName, compName, STOPPING);
 			EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 			final String uuid =
 					provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_STOP_SPARK,
@@ -230,11 +274,11 @@
 	@Override
 	public void startSparkCluster(UserInfo userInfo, String expName, String compName, @Project String project) {
 		final UserInstanceDTO userInstance =
-				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), expName, true);
+				exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, expName, true);
 		final UserInstanceStatus requiredStatus = UserInstanceStatus.STOPPED;
 		if (computationalWithStatusResourceExist(compName, userInstance, requiredStatus)) {
 			log.debug("{} spark cluster {} for userInstance {}", STARTING.toString(), compName, expName);
-			updateComputationalStatus(userInfo.getName(), expName, compName, STARTING);
+			updateComputationalStatus(userInfo.getName(), project, expName, compName, STARTING);
 			EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 			final String uuid =
 					provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_START_SPARK,
@@ -249,12 +293,12 @@
 	}
 
 	@Override
-	public void updateSparkClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName,
+	public void updateSparkClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName,
 										 List<ClusterConfig> config) {
 		final String userName = userInfo.getName();
 		final String token = userInfo.getAccessToken();
 		final UserInstanceDTO userInstanceDTO = exploratoryDAO
-				.fetchExploratoryFields(userName, exploratoryName, true);
+				.fetchExploratoryFields(userName, project, exploratoryName, true);
 		final UserComputationalResource compResource = userInstanceDTO
 				.getResources()
 				.stream()
@@ -269,6 +313,7 @@
 				provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_RECONFIGURE_SPARK,
 						token, clusterConfigDto, String.class);
 		computationalDAO.updateComputationalFields(new ComputationalStatusDTO()
+				.withProject(userInstanceDTO.getProject())
 				.withComputationalName(computationalName)
 				.withExploratoryName(exploratoryName)
 				.withConfig(config)
@@ -279,37 +324,19 @@
 	}
 
 	/**
-	 * Updates parameter 'reuploadKeyRequired' for corresponding user's computational resources with allowable statuses
-	 * which are affiliated with exploratories with theirs allowable statuses.
-	 *
-	 * @param user                  user.
-	 * @param exploratoryStatuses   allowable exploratories' statuses.
-	 * @param computationalTypes    type list of computational resource.
-	 * @param reuploadKeyRequired   true/false.
-	 * @param computationalStatuses allowable statuses for computational resources.
-	 */
-	@Override
-	public void updateComputationalsReuploadKeyFlag(String user, List<UserInstanceStatus> exploratoryStatuses,
-													List<DataEngineType> computationalTypes,
-													boolean reuploadKeyRequired,
-													UserInstanceStatus... computationalStatuses) {
-		computationalDAO.updateReuploadKeyFlagForComputationalResources(user, exploratoryStatuses, computationalTypes,
-				reuploadKeyRequired, computationalStatuses);
-	}
-
-	/**
 	 * Returns computational resource's data by name for user's exploratory.
 	 *
-	 * @param user              user.
+	 * @param user              user
+	 * @param project           name of project
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @return corresponding computational resource's data or empty data if resource doesn't exist.
 	 */
 	@Override
-	public Optional<UserComputationalResource> getComputationalResource(String user, String exploratoryName,
+	public Optional<UserComputationalResource> getComputationalResource(String user, String project, String exploratoryName,
 																		String computationalName) {
 		try {
-			return Optional.of(computationalDAO.fetchComputationalFields(user, exploratoryName, computationalName));
+			return Optional.of(computationalDAO.fetchComputationalFields(user, project, exploratoryName, computationalName));
 		} catch (DlabException e) {
 			log.warn("Computational resource {} affiliated with exploratory {} for user {} not found.",
 					computationalName, exploratoryName, user);
@@ -318,22 +345,24 @@
 	}
 
 	@Override
-	public List<ClusterConfig> getClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName) {
-		return computationalDAO.getClusterConfig(userInfo.getName(), exploratoryName, computationalName);
+	public List<ClusterConfig> getClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName) {
+		return computationalDAO.getClusterConfig(userInfo.getName(), project, exploratoryName, computationalName);
 	}
 
 	/**
 	 * Updates the status of computational resource in database.
 	 *
 	 * @param user              user name.
+	 * @param project           project name
 	 * @param exploratoryName   name of exploratory.
 	 * @param computationalName name of computational resource.
 	 * @param status            status
 	 */
-	private void updateComputationalStatus(String user, String exploratoryName, String computationalName,
+	private void updateComputationalStatus(String user, String project, String exploratoryName, String computationalName,
 										   UserInstanceStatus status) {
 		ComputationalStatusDTO computationalStatus = new ComputationalStatusDTO()
 				.withUser(user)
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withComputationalName(computationalName)
 				.withStatus(status);
@@ -367,5 +396,4 @@
 				compResource.getDataEngineType() == SPARK_STANDALONE &&
 				compResource.getComputationalName().equals(computationalName);
 	}
-
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
index 808f8ae..8b2806b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
@@ -20,6 +20,9 @@
 package com.epam.dlab.backendapi.service.impl;
 
 import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.annotation.Project;
+import com.epam.dlab.backendapi.annotation.ProjectAdmin;
+import com.epam.dlab.backendapi.annotation.User;
 import com.epam.dlab.backendapi.dao.EnvDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.UserSettingsDAO;
@@ -52,23 +55,29 @@
 @Singleton
 @Slf4j
 public class EnvironmentServiceImpl implements EnvironmentService {
-
 	private static final String ERROR_MSG_FORMAT = "Can not %s environment because on of user resource is in status " +
 			"CREATING or STARTING";
+
+	private final EnvDAO envDAO;
+	private final UserSettingsDAO settingsDAO;
+	private final ExploratoryDAO exploratoryDAO;
+	private final ExploratoryService exploratoryService;
+	private final ComputationalService computationalService;
+	private final SecurityService securityService;
+	private final ProjectService projectService;
+
 	@Inject
-	private EnvDAO envDAO;
-	@Inject
-	private ExploratoryDAO exploratoryDAO;
-	@Inject
-	private ExploratoryService exploratoryService;
-	@Inject
-	private ComputationalService computationalService;
-	@Inject
-	private SecurityService securityService;
-	@Inject
-	private ProjectService projectService;
-	@Inject
-	private UserSettingsDAO settingsDAO;
+	public EnvironmentServiceImpl(EnvDAO envDAO, UserSettingsDAO settingsDAO, ExploratoryDAO exploratoryDAO,
+								  ExploratoryService exploratoryService, ComputationalService computationalService,
+								  SecurityService securityService, ProjectService projectService) {
+		this.envDAO = envDAO;
+		this.settingsDAO = settingsDAO;
+		this.exploratoryDAO = exploratoryDAO;
+		this.exploratoryService = exploratoryService;
+		this.computationalService = computationalService;
+		this.securityService = securityService;
+		this.projectService = projectService;
+	}
 
 	@Override
 	public List<UserDTO> getUsers() {
@@ -87,18 +96,13 @@
 	}
 
 	@Override
-	public Set<String> getUserNames() {
-		log.debug("Getting all users...");
-		return envDAO.fetchAllUsers();
-	}
-
-	@Override
-	public List<UserResourceInfo> getAllEnv() {
+	public List<UserResourceInfo> getAllEnv(UserInfo user) {
 		log.debug("Getting all user's environment...");
 		List<UserInstanceDTO> expList = exploratoryDAO.getInstances();
-		return projectService.getProjects()
+		return projectService.getProjects(user)
 				.stream()
-				.map(projectDTO -> getProjectEnv(projectDTO, expList)).flatMap(Collection::stream)
+				.map(projectDTO -> getProjectEnv(projectDTO, expList))
+				.flatMap(Collection::stream)
 				.collect(toList());
 	}
 
@@ -112,14 +116,6 @@
 	}
 
 	@Override
-	public void stopEnvironment(UserInfo userInfo, String user) {
-		log.debug("Stopping environment for user {}", user);
-		checkState(user, "stop");
-		exploratoryDAO.fetchRunningExploratoryFields(user)
-				.forEach(e -> stopExploratory(userInfo, user, e.getExploratoryName()));
-	}
-
-	@Override
 	public void stopEnvironmentWithServiceAccount(String user) {
 		log.debug("Stopping environment for user {} by scheduler", user);
 		checkState(user, "stop");
@@ -140,25 +136,31 @@
 						endpoint.getName(), project));
 	}
 
+	@ProjectAdmin
 	@Override
-	public void stopExploratory(UserInfo userInfo, String user, String exploratoryName) {
-		exploratoryService.stop(new UserInfo(user, userInfo.getAccessToken()), exploratoryName);
+	public void stopExploratory(@User UserInfo userInfo, String user, @Project String project, String exploratoryName) {
+		exploratoryService.stop(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void stopComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName) {
-		computationalService.stopSparkCluster(new UserInfo(user, userInfo.getAccessToken()),
-				exploratoryName, computationalName);
+	public void stopComputational(@User UserInfo userInfo, String user, @Project String project, String exploratoryName,
+								  String computationalName) {
+		computationalService.stopSparkCluster(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName,
+				computationalName);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void terminateExploratory(UserInfo userInfo, String user, String exploratoryName) {
-		exploratoryService.terminate(new UserInfo(user, userInfo.getAccessToken()), exploratoryName);
+	public void terminateExploratory(@User UserInfo userInfo, String user, @Project String project, String exploratoryName) {
+		exploratoryService.terminate(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void terminateComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName) {
-		computationalService.terminateComputational(new UserInfo(user, userInfo.getAccessToken()), exploratoryName,
+	public void terminateComputational(@User UserInfo userInfo, String user, @Project String project,
+									   String exploratoryName, String computationalName) {
+		computationalService.terminateComputational(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName,
 				computationalName);
 	}
 
@@ -181,12 +183,14 @@
 
 	private void stopNotebookWithServiceAccount(UserInstanceDTO instance) {
 		final UserInfo userInfo = securityService.getServiceAccountInfo(instance.getUser());
-		exploratoryService.stop(userInfo, instance.getExploratoryName());
+		exploratoryService.stop(userInfo, instance.getProject(), instance.getExploratoryName());
 	}
 
 	private List<UserResourceInfo> getProjectEnv(ProjectDTO projectDTO, List<UserInstanceDTO> allInstances) {
-		final Stream<UserResourceInfo> userResources = allInstances.stream()
-				.filter(instance -> instance.getProject().equals(projectDTO.getName())).map(this::toUserResourceInfo);
+		final Stream<UserResourceInfo> userResources = allInstances
+				.stream()
+				.filter(instance -> instance.getProject().equals(projectDTO.getName()))
+				.map(this::toUserResourceInfo);
 		if (projectDTO.getEndpoints() != null) {
 			final Stream<UserResourceInfo> edges = projectDTO.getEndpoints()
 					.stream()
@@ -194,8 +198,7 @@
 							.withResourceStatus(e.getStatus().toString())
 							.withProject(projectDTO.getName())
 							.withIp(e.getEdgeInfo() != null ? e.getEdgeInfo().getPublicIp() : null));
-			return Stream.concat(edges, userResources)
-					.collect(toList());
+			return Stream.concat(edges, userResources).collect(toList());
 		} else {
 			return userResources.collect(toList());
 		}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
index 59f9adc..6a2a615 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
@@ -61,6 +61,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.stream.Collectors;
 
 import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
@@ -105,17 +106,17 @@
 	@BudgetLimited
 	@Override
 	public String start(UserInfo userInfo, String exploratoryName, @Project String project) {
-		return action(userInfo, exploratoryName, EXPLORATORY_START, STARTING);
+		return action(userInfo, project, exploratoryName, EXPLORATORY_START, STARTING);
 	}
 
 	@Override
-	public String stop(UserInfo userInfo, String exploratoryName) {
-		return action(userInfo, exploratoryName, EXPLORATORY_STOP, STOPPING);
+	public String stop(UserInfo userInfo, String project, String exploratoryName) {
+		return action(userInfo, project, exploratoryName, EXPLORATORY_STOP, STOPPING);
 	}
 
 	@Override
-	public String terminate(UserInfo userInfo, String exploratoryName) {
-		return action(userInfo, exploratoryName, EXPLORATORY_TERMINATE, TERMINATING);
+	public String terminate(UserInfo userInfo, String project, String exploratoryName) {
+		return action(userInfo, project, exploratoryName, EXPLORATORY_TERMINATE, TERMINATING);
 	}
 
 	@BudgetLimited
@@ -142,7 +143,7 @@
 			log.error("Could not update the status of exploratory environment {} with name {} for user {}",
 					exploratory.getDockerImage(), exploratory.getName(), userInfo.getName(), t);
 			if (isAdded) {
-				updateExploratoryStatusSilent(userInfo.getName(), exploratory.getName(), FAILED);
+				updateExploratoryStatusSilent(userInfo.getName(), project, exploratory.getName(), FAILED);
 			}
 			throw new DlabException("Could not create exploratory environment " + exploratory.getName() + " for user "
 					+ userInfo.getName() + ": " + Optional.ofNullable(t.getCause()).map(Throwable::getMessage).orElse(t.getMessage()), t);
@@ -150,58 +151,16 @@
 	}
 
 	@Override
-	public void updateExploratoryStatuses(String user, UserInstanceStatus status) {
-		exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(user, TERMINATED, FAILED)
-				.forEach(ui -> updateExploratoryStatus(ui.getExploratoryName(), status, user));
-	}
-
-	@Override
 	public void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status) {
 		exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(project, endpoint, TERMINATED, FAILED)
-				.forEach(ui -> updateExploratoryStatus(ui.getExploratoryName(), status, ui.getUser()));
-	}
-
-	/**
-	 * Updates parameter 'reuploadKeyRequired' for corresponding user's exploratories with allowable statuses.
-	 *
-	 * @param user                user.
-	 * @param reuploadKeyRequired true/false.
-	 * @param exploratoryStatuses allowable exploratories' statuses.
-	 */
-	@Override
-	public void updateExploratoriesReuploadKeyFlag(String user, boolean reuploadKeyRequired,
-												   UserInstanceStatus... exploratoryStatuses) {
-		exploratoryDAO.updateReuploadKeyForExploratories(user, reuploadKeyRequired, exploratoryStatuses);
-	}
-
-	/**
-	 * Returns list of user's exploratories and corresponding computational resources where both of them have
-	 * predefined statuses.
-	 *
-	 * @param user                user.
-	 * @param exploratoryStatus   status for exploratory environment.
-	 * @param computationalStatus status for computational resource affiliated with the exploratory.
-	 * @return list with user instances.
-	 */
-	@Override
-	public List<UserInstanceDTO> getInstancesWithStatuses(String user, UserInstanceStatus exploratoryStatus,
-														  UserInstanceStatus computationalStatus) {
-		return getExploratoriesWithStatus(user, exploratoryStatus).stream()
-				.map(e -> e.withResources(computationalResourcesWithStatus(e, computationalStatus)))
-				.collect(Collectors.toList());
+				.forEach(ui -> updateExploratoryStatus(project, ui.getExploratoryName(), status, ui.getUser()));
 	}
 
 	@Override
-	public List<UserInstanceDTO> findAll() {
-		return exploratoryDAO.getInstances();
-	}
-
-	@Override
-	public void updateClusterConfig(UserInfo userInfo, String exploratoryName, List<ClusterConfig> config) {
+	public void updateClusterConfig(UserInfo userInfo, String project, String exploratoryName, List<ClusterConfig> config) {
 		final String userName = userInfo.getName();
 		final String token = userInfo.getAccessToken();
-		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchRunningExploratoryFields(userName,
-				exploratoryName);
+		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchRunningExploratoryFields(userName, project, exploratoryName);
 		EndpointDTO endpointDTO = endpointService.get(userInstanceDTO.getEndpoint());
 		final ExploratoryReconfigureSparkClusterActionDTO updateClusterConfigDTO =
 				requestBuilder.newClusterConfigUpdate(userInfo, userInstanceDTO, config, endpointDTO);
@@ -211,6 +170,7 @@
 		requestId.put(userName, uuid);
 		exploratoryDAO.updateExploratoryFields(new ExploratoryStatusDTO()
 				.withUser(userName)
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withConfig(config)
 				.withStatus(UserInstanceStatus.RECONFIGURING.toString()));
@@ -220,13 +180,14 @@
 	 * Returns user instance's data by it's name.
 	 *
 	 * @param user            user.
+	 * @param project
 	 * @param exploratoryName name of exploratory.
 	 * @return corresponding user instance's data or empty data if resource doesn't exist.
 	 */
 	@Override
-	public Optional<UserInstanceDTO> getUserInstance(String user, String exploratoryName) {
+	public Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName) {
 		try {
-			return Optional.of(exploratoryDAO.fetchExploratoryFields(user, exploratoryName));
+			return Optional.of(exploratoryDAO.fetchExploratoryFields(user, project, exploratoryName));
 		} catch (DlabException e) {
 			log.warn("User instance with exploratory name {} for user {} not found.", exploratoryName, user);
 		}
@@ -234,8 +195,22 @@
 	}
 
 	@Override
-	public List<ClusterConfig> getClusterConfig(UserInfo user, String exploratoryName) {
-		return exploratoryDAO.getClusterConfig(user.getName(), exploratoryName);
+	public List<UserInstanceDTO> findAll() {
+		return exploratoryDAO.getInstances();
+	}
+
+	@Override
+	public List<UserInstanceDTO> findAll(Set<ProjectDTO> projects) {
+		List<String> projectNames = projects
+				.stream()
+				.map(ProjectDTO::getName)
+				.collect(Collectors.toList());
+		return exploratoryDAO.fetchExploratoryFieldsForProjectWithComp(projectNames);
+	}
+
+	@Override
+	public List<ClusterConfig> getClusterConfig(UserInfo user, String project, String exploratoryName) {
+		return exploratoryDAO.getClusterConfig(user.getName(), project, exploratoryName);
 	}
 
 	@Override
@@ -261,30 +236,20 @@
 	}
 
 	/**
-	 * Returns list of user's exploratories with predefined status.
-	 *
-	 * @param user   user.
-	 * @param status status for exploratory environment.
-	 * @return list of user's instances.
-	 */
-	private List<UserInstanceDTO> getExploratoriesWithStatus(String user, UserInstanceStatus status) {
-		return exploratoryDAO.fetchUserExploratoriesWhereStatusIn(user, true, status);
-	}
-
-	/**
 	 * Sends the post request to the provisioning service and update the status of exploratory environment.
 	 *
 	 * @param userInfo        user info.
+	 * @param project         name of project
 	 * @param exploratoryName name of exploratory environment.
 	 * @param action          action for exploratory environment.
 	 * @param status          status for exploratory environment.
 	 * @return Invocation request as JSON string.
 	 */
-	private String action(UserInfo userInfo, String exploratoryName, String action, UserInstanceStatus status) {
+	private String action(UserInfo userInfo, String project, String exploratoryName, String action, UserInstanceStatus status) {
 		try {
-			updateExploratoryStatus(exploratoryName, status, userInfo.getName());
+			updateExploratoryStatus(project, exploratoryName, status, userInfo.getName());
 
-			UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), exploratoryName);
+			UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, exploratoryName);
 			EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 			final String uuid =
 					provisioningService.post(endpointDTO.getUrl() + action, userInfo.getAccessToken(),
@@ -294,7 +259,7 @@
 		} catch (Exception t) {
 			log.error("Could not {} exploratory environment {} for user {}",
 					StringUtils.substringAfter(action, "/"), exploratoryName, userInfo.getName(), t);
-			updateExploratoryStatusSilent(userInfo.getName(), exploratoryName, FAILED);
+			updateExploratoryStatusSilent(userInfo.getName(), project, exploratoryName, FAILED);
 			final String errorMsg = String.format("Could not %s exploratory environment %s: %s",
 					StringUtils.substringAfter(action, "/"), exploratoryName,
 					Optional.ofNullable(t.getCause()).map(Throwable::getMessage).orElse(t.getMessage()));
@@ -302,15 +267,15 @@
 		}
 	}
 
-	private void updateExploratoryStatus(String exploratoryName, UserInstanceStatus status, String user) {
-		updateExploratoryStatus(user, exploratoryName, status);
+	private void updateExploratoryStatus(String project, String exploratoryName, UserInstanceStatus status, String user) {
+		updateExploratoryStatus(user, project, exploratoryName, status);
 
 		if (status == STOPPING) {
-			updateComputationalStatuses(user, exploratoryName, STOPPING, TERMINATING, FAILED, TERMINATED, STOPPED);
+			updateComputationalStatuses(user, project, exploratoryName, STOPPING, TERMINATING, FAILED, TERMINATED, STOPPED);
 		} else if (status == TERMINATING) {
-			updateComputationalStatuses(user, exploratoryName, TERMINATING, TERMINATING, TERMINATED, FAILED);
+			updateComputationalStatuses(user, project, exploratoryName, TERMINATING, TERMINATING, TERMINATED, FAILED);
 		} else if (status == TERMINATED) {
-			updateComputationalStatuses(user, exploratoryName, TERMINATED, TERMINATED, TERMINATED, FAILED);
+			updateComputationalStatuses(user, project, exploratoryName, TERMINATED, TERMINATED, TERMINATED, FAILED);
 		}
 	}
 
@@ -332,11 +297,12 @@
 	 * Updates the status of exploratory environment.
 	 *
 	 * @param user            user name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private void updateExploratoryStatus(String user, String exploratoryName, UserInstanceStatus status) {
-		StatusEnvBaseDTO<?> exploratoryStatus = createStatusDTO(user, exploratoryName, status);
+	private void updateExploratoryStatus(String user, String project, String exploratoryName, UserInstanceStatus status) {
+		StatusEnvBaseDTO<?> exploratoryStatus = createStatusDTO(user, project, exploratoryName, status);
 		exploratoryDAO.updateExploratoryStatus(exploratoryStatus);
 	}
 
@@ -344,36 +310,39 @@
 	 * Updates the status of exploratory environment without exceptions. If exception occurred then logging it.
 	 *
 	 * @param user            user name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private void updateExploratoryStatusSilent(String user, String exploratoryName, UserInstanceStatus status) {
+	private void updateExploratoryStatusSilent(String user, String project, String exploratoryName, UserInstanceStatus status) {
 		try {
-			updateExploratoryStatus(user, exploratoryName, status);
+			updateExploratoryStatus(user, project, exploratoryName, status);
 		} catch (DlabException e) {
 			log.error("Could not update the status of exploratory environment {} for user {} to {}",
 					exploratoryName, user, status, e);
 		}
 	}
 
-	private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus
+	private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus
 			dataEngineStatus, UserInstanceStatus dataEngineServiceStatus, UserInstanceStatus... excludedStatuses) {
 		log.debug("updating status for all computational resources of {} for user {}: DataEngine {}, " +
 				"dataengine-service {}", exploratoryName, user, dataEngineStatus, dataEngineServiceStatus);
-		computationalDAO.updateComputationalStatusesForExploratory(user, exploratoryName, dataEngineStatus,
-				dataEngineServiceStatus, excludedStatuses);
+		computationalDAO.updateComputationalStatusesForExploratory(user, project, exploratoryName,
+				dataEngineStatus, dataEngineServiceStatus, excludedStatuses);
 	}
 
 	/**
 	 * Instantiates and returns the descriptor of exploratory environment status.
 	 *
 	 * @param user            user name
+	 * @param project         project
 	 * @param exploratoryName name of exploratory environment.
 	 * @param status          status for exploratory environment.
 	 */
-	private StatusEnvBaseDTO<?> createStatusDTO(String user, String exploratoryName, UserInstanceStatus status) {
+	private StatusEnvBaseDTO<?> createStatusDTO(String user, String project, String exploratoryName, UserInstanceStatus status) {
 		return new ExploratoryStatusDTO()
 				.withUser(user)
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withStatus(status);
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
index bd00f38..85ce534 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
@@ -73,15 +73,15 @@
 	private EndpointService endpointService;
 
 	@Override
-	public String createImage(UserInfo user, String exploratoryName, String imageName, String imageDescription) {
+	public String createImage(UserInfo user, String project, String exploratoryName, String imageName, String imageDescription) {
 
-		UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(user.getName(), exploratoryName);
+		UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(user.getName(), project, exploratoryName);
 
 		if (imageExploratoryDao.exist(imageName, userInstance.getProject())) {
 			log.error(String.format(IMAGE_EXISTS_MSG, imageName, userInstance.getProject()));
 			throw new ResourceAlreadyExistException(String.format(IMAGE_EXISTS_MSG, imageName, userInstance.getProject()));
 		}
-		final List<Library> libraries = libDAO.getLibraries(user.getName(), exploratoryName);
+		final List<Library> libraries = libDAO.getLibraries(user.getName(), project, exploratoryName);
 
 		imageExploratoryDao.save(Image.builder()
 				.name(imageName)
@@ -98,6 +98,7 @@
 
 		exploratoryDAO.updateExploratoryStatus(new ExploratoryStatusDTO()
 				.withUser(user.getName())
+				.withProject(project)
 				.withExploratoryName(exploratoryName)
 				.withStatus(UserInstanceStatus.CREATING_IMAGE));
 
@@ -113,13 +114,14 @@
 				exploratoryName, image.getUser());
 		exploratoryDAO.updateExploratoryStatus(new ExploratoryStatusDTO()
 				.withUser(image.getUser())
+				.withProject(image.getProject())
 				.withExploratoryName(exploratoryName)
 				.withStatus(UserInstanceStatus.RUNNING));
 		imageExploratoryDao.updateImageFields(image);
 		if (newNotebookIp != null) {
 			log.debug("Changing exploratory ip with name {} for user {} to {}", exploratoryName, image.getUser(),
 					newNotebookIp);
-			exploratoryDAO.updateExploratoryIp(image.getUser(), newNotebookIp, exploratoryName);
+			exploratoryDAO.updateExploratoryIp(image.getUser(), image.getProject(), newNotebookIp, exploratoryName);
 		}
 
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
index 1d3230f..dd370dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
@@ -75,9 +75,9 @@
 	}
 
 	@Override
-	public void updateLastActivityForComputational(UserInfo userInfo, String exploratoryName,
+	public void updateLastActivityForComputational(UserInfo userInfo, String project, String exploratoryName,
 												   String computationalName, LocalDateTime lastActivity) {
-		computationalDAO.updateLastActivity(userInfo.getName(), exploratoryName, computationalName, lastActivity);
+		computationalDAO.updateLastActivity(userInfo.getName(), project, exploratoryName, computationalName, lastActivity);
 	}
 
 	private void updateLastActivity(UserInstanceDTO ui) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceImpl.java
index eaf7071..6d46d71 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InfrastructureInfoServiceImpl.java
@@ -26,9 +26,11 @@
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.domain.EndpointDTO;
 import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
+import com.epam.dlab.backendapi.resources.dto.HealthStatusEnum;
 import com.epam.dlab.backendapi.resources.dto.HealthStatusPageDTO;
 import com.epam.dlab.backendapi.resources.dto.ProjectInfrastructureInfo;
-import com.epam.dlab.backendapi.service.BillingServiceNew;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.backendapi.service.BillingService;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.InfrastructureInfoService;
 import com.epam.dlab.backendapi.service.ProjectService;
@@ -44,7 +46,9 @@
 import lombok.extern.slf4j.Slf4j;
 import org.bson.Document;
 
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -63,19 +67,19 @@
 	private final BillingDAO billingDAO;
 	private final ProjectService projectService;
 	private final EndpointService endpointService;
-	private final BillingServiceNew billingServiceNew;
+	private final BillingService billingService;
 
 	@Inject
 	public InfrastructureInfoServiceImpl(ExploratoryDAO expDAO, EnvDAO envDAO, SelfServiceApplicationConfiguration configuration,
 										 BillingDAO billingDAO, ProjectService projectService, EndpointService endpointService,
-										 BillingServiceNew billingServiceNew) {
+										 BillingService billingService) {
 		this.expDAO = expDAO;
 		this.envDAO = envDAO;
 		this.configuration = configuration;
 		this.billingDAO = billingDAO;
 		this.projectService = projectService;
 		this.endpointService = endpointService;
-		this.billingServiceNew = billingServiceNew;
+		this.billingService = billingService;
 	}
 
 	@Override
@@ -98,8 +102,16 @@
 
 						List<BillingData> collect = e.getValue()
 								.stream()
-								.map(exp -> billingServiceNew.getExploratoryRemoteBillingData(user, (String) exp.get("endpoint"),
-										expDAO.findExploratories(e.getKey(), (String) exp.get("endpoint"), user.getName())))
+								.map(exp -> {
+									List<BillingData> exploratoryRemoteBillingData = new ArrayList<>();
+									try {
+										exploratoryRemoteBillingData = billingService.getExploratoryRemoteBillingData(user, (String) exp.get("endpoint"),
+												expDAO.findExploratories(e.getKey(), (String) exp.get("endpoint"), user.getName()));
+									} catch (Exception ex) {
+										log.error("Cannot retrieve billing information", ex);
+									}
+									return exploratoryRemoteBillingData;
+								})
 								.flatMap(Collection::stream)
 								.collect(Collectors.toList());
 
@@ -118,17 +130,20 @@
 	}
 
 	@Override
-	public HealthStatusPageDTO getHeathStatus(UserInfo userInfo, boolean fullReport, boolean isAdmin) {
+	public HealthStatusPageDTO getHeathStatus(UserInfo userInfo, boolean fullReport) {
 		final String user = userInfo.getName();
 		log.debug("Request the status of resources for user {}, report type {}", user, fullReport);
 		try {
-
-			return envDAO.getHealthStatusPageDTO(user, fullReport)
-					.withBillingEnabled(configuration.isBillingSchedulerEnabled())
-					.withAdmin(isAdmin)
-					.withProjectAssinged(projectService.isAnyProjectAssigned(userInfo))
-					.withBillingQuoteUsed(billingDAO.getBillingQuoteUsed())
-					.withBillingUserQuoteUsed(billingDAO.getBillingUserQuoteUsed(user));
+			return HealthStatusPageDTO.builder()
+					.status(HealthStatusEnum.OK.toString())
+					.listResources(Collections.emptyList())
+					.billingEnabled(configuration.isBillingSchedulerEnabled())
+					.projectAdmin(UserRoles.isProjectAdmin(userInfo))
+					.admin(UserRoles.isAdmin(userInfo))
+					.projectAssigned(projectService.isAnyProjectAssigned(userInfo))
+					.billingQuoteUsed(billingDAO.getBillingQuoteUsed())
+					.billingUserQuoteUsed(billingDAO.getBillingUserQuoteUsed(user))
+					.build();
 		} catch (Exception e) {
 			log.warn("Could not return status of resources for user {}: {}", user, e.getLocalizedMessage(), e);
 			throw new DlabException(e.getMessage(), e);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
index 3d65a15..3fbb170 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
@@ -50,7 +50,13 @@
 import org.apache.commons.lang3.StringUtils;
 import org.bson.Document;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.TreeMap;
 import java.util.stream.Collectors;
 
 @Slf4j
@@ -80,13 +86,13 @@
 
 	@Override
 	@SuppressWarnings("unchecked")
-	public List<Document> getLibs(String user, String exploratoryName, String computationalName) {
+	public List<Document> getLibs(String user, String project, String exploratoryName, String computationalName) {
 		if (StringUtils.isEmpty(computationalName)) {
-			return (List<Document>) libraryDAO.findExploratoryLibraries(user, exploratoryName)
+			return (List<Document>) libraryDAO.findExploratoryLibraries(user, project, exploratoryName)
 					.getOrDefault(ExploratoryLibDAO.EXPLORATORY_LIBS, new ArrayList<>());
 		} else {
-			Document document = (Document) libraryDAO.findComputationalLibraries(user, exploratoryName,
-					computationalName)
+			Document document = (Document) libraryDAO.findComputationalLibraries(user, project,
+					exploratoryName, computationalName)
 					.getOrDefault(ExploratoryLibDAO.COMPUTATIONAL_LIBS, new Document());
 
 			return (List<Document>) document.getOrDefault(computationalName, new ArrayList<>());
@@ -95,8 +101,8 @@
 
 	@Override
 	@SuppressWarnings("unchecked")
-	public List<LibInfoRecord> getLibInfo(String user, String exploratoryName) {
-		Document document = libraryDAO.findAllLibraries(user, exploratoryName);
+	public List<LibInfoRecord> getLibInfo(String user, String project, String exploratoryName) {
+		Document document = libraryDAO.findAllLibraries(user, project, exploratoryName);
 
 		Map<LibKey, List<LibraryStatus>> model = new TreeMap<>(Comparator.comparing(LibKey::getName)
 				.thenComparing(LibKey::getVersion)
@@ -124,52 +130,52 @@
 	}
 
 	@Override
-	public String installComputationalLibs(UserInfo ui, String expName, String compName,
+	public String installComputationalLibs(UserInfo ui, String project, String expName, String compName,
 										   List<LibInstallDTO> libs) {
 
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(ui.getName(), expName, compName);
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(ui.getName(), project, expName, compName);
 		EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 		final String uuid =
 				provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_LIB_INSTALL,
 						ui.getAccessToken(),
-						toComputationalLibraryInstallDto(ui, expName, compName, libs, userInstance, endpointDTO),
+						toComputationalLibraryInstallDto(ui, project, expName, compName, libs, userInstance, endpointDTO),
 						String.class);
 		requestId.put(ui.getName(), uuid);
 		return uuid;
 	}
 
 	@Override
-	public String installExploratoryLibs(UserInfo ui, String expName, List<LibInstallDTO> libs) {
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(ui.getName(), expName);
+	public String installExploratoryLibs(UserInfo ui, String project, String expName, List<LibInstallDTO> libs) {
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(ui.getName(), project, expName);
 		EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
 		final String uuid =
 				provisioningService.post(endpointDTO.getUrl() + ExploratoryAPI.EXPLORATORY_LIB_INSTALL,
-						ui.getAccessToken(), toExploratoryLibraryInstallDto(ui, expName, libs, userInstance, endpointDTO),
+						ui.getAccessToken(), toExploratoryLibraryInstallDto(ui, project, expName, libs, userInstance, endpointDTO),
 						String.class);
 		requestId.put(ui.getName(), uuid);
 		return uuid;
 	}
 
-	private LibraryInstallDTO toExploratoryLibraryInstallDto(UserInfo userInfo, String exploratoryName,
+	private LibraryInstallDTO toExploratoryLibraryInstallDto(UserInfo userInfo, String project, String exploratoryName,
 															 List<LibInstallDTO> libs, UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
 		final List<LibInstallDTO> libsToInstall = libs.stream()
-				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), exploratoryName,
+				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), project, exploratoryName,
 						lib.getGroup(), lib.getName())))
-				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), exploratoryName, l, l.isOverride()))
+				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), project, exploratoryName, l, l.isOverride()))
 				.collect(Collectors.toList());
 		return requestBuilder.newLibInstall(userInfo, userInstance, endpointDTO, libsToInstall);
 	}
 
-	private LibraryInstallDTO toComputationalLibraryInstallDto(UserInfo userInfo, String expName, String compName,
-															   List<LibInstallDTO> libs,
+	private LibraryInstallDTO toComputationalLibraryInstallDto(UserInfo userInfo, String project, String expName,
+															   String compName, List<LibInstallDTO> libs,
 															   UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
 
 		final UserComputationalResource computationalResource = getComputationalResource(compName, userInstance);
 		final List<LibInstallDTO> libsToInstall = libs.stream()
-				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), expName,
-						compName, lib.getGroup(), lib.getName())))
-				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), expName, compName, l,
-						l.isOverride()))
+				.map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), project,
+						expName, compName, lib.getGroup(), lib.getName())))
+				.peek(l -> libraryDAO.addLibrary(userInfo.getName(), project, expName, compName,
+						l, l.isOverride()))
 				.collect(Collectors.toList());
 		return requestBuilder.newLibInstall(userInfo, userInstance, computationalResource, libsToInstall, endpointDTO);
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
index 5355dee..f33f9e2 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
@@ -3,10 +3,17 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.annotation.BudgetLimited;
 import com.epam.dlab.backendapi.annotation.Project;
+import com.epam.dlab.backendapi.annotation.ProjectAdmin;
+import com.epam.dlab.backendapi.annotation.User;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.dao.UserGroupDao;
-import com.epam.dlab.backendapi.domain.*;
+import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.domain.ProjectEndpointDTO;
+import com.epam.dlab.backendapi.domain.RequestId;
+import com.epam.dlab.backendapi.domain.UpdateProjectDTO;
+import com.epam.dlab.backendapi.roles.UserRoles;
 import com.epam.dlab.backendapi.service.EndpointService;
 import com.epam.dlab.backendapi.service.ExploratoryService;
 import com.epam.dlab.backendapi.service.ProjectService;
@@ -21,7 +28,10 @@
 import com.google.inject.name.Named;
 import lombok.extern.slf4j.Slf4j;
 
-import java.util.*;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
@@ -71,9 +81,10 @@
 	}
 
 	@Override
-	public List<ProjectManagingDTO> getProjectsForManaging() {
-		return projectDAO.getProjects().stream().map(p -> new ProjectManagingDTO(
-				p.getName(), p.getBudget(), isCanBeStopped(p), isCanBeTerminated(p)))
+	public List<ProjectDTO> getProjects(UserInfo user) {
+		return projectDAO.getProjects()
+				.stream()
+				.filter(project -> UserRoles.isProjectAdmin(user, project.getGroups()) || UserRoles.isAdmin(user))
 				.collect(Collectors.toList());
 	}
 
@@ -83,11 +94,6 @@
 	}
 
 	@Override
-	public List<ProjectDTO> getProjectsWithStatus(ProjectDTO.Status status) {
-		return projectDAO.getProjectsWithStatus(status);
-	}
-
-	@Override
 	public List<ProjectDTO> getProjectsByEndpoint(String endpointName) {
 		return projectDAO.getProjectsByEndpoint(endpointName);
 	}
@@ -116,21 +122,13 @@
 		exploratoryService.updateProjectExploratoryStatuses(name, endpoint, UserInstanceStatus.TERMINATING);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void terminateEndpoint(UserInfo userInfo, List<String> endpoints, String name) {
+	public void terminateEndpoint(@User UserInfo userInfo, List<String> endpoints, @Project String name) {
+		System.out.println("sd");
 		endpoints.forEach(endpoint -> terminateEndpoint(userInfo, endpoint, name));
 	}
 
-	@Override
-	public void terminateProject(UserInfo userInfo, String name) {
-		List<ProjectEndpointDTO> endpoints = get(name).getEndpoints();
-		checkProjectRelatedResourcesInProgress(name, endpoints, TERMINATE_ACTION);
-
-		endpoints.stream()
-				.map(ProjectEndpointDTO::getName)
-				.forEach(endpoint -> terminateEndpoint(userInfo, endpoint, name));
-	}
-
 	@BudgetLimited
 	@Override
 	public void start(UserInfo userInfo, String endpoint, @Project String name) {
@@ -138,8 +136,9 @@
 		projectDAO.updateEdgeStatus(name, endpoint, UserInstanceStatus.STARTING);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void start(UserInfo userInfo, List<String> endpoints, String name) {
+	public void start(@User UserInfo userInfo, List<String> endpoints, @Project String name) {
 		endpoints.forEach(endpoint -> start(userInfo, endpoint, name));
 	}
 
@@ -149,26 +148,27 @@
 		projectDAO.updateEdgeStatus(name, endpoint, UserInstanceStatus.STOPPING);
 	}
 
+	@ProjectAdmin
 	@Override
-	public void stop(UserInfo userInfo, List<String> endpoints, String name) {
-		endpoints.forEach(endpoint -> stop(userInfo, endpoint, name));
-	}
-
-	@Override
-	public void stopWithResources(UserInfo userInfo, String projectName) {
-		List<ProjectEndpointDTO> endpoints = get(projectName).getEndpoints();
-		checkProjectRelatedResourcesInProgress(projectName, endpoints, STOP_ACTION);
+	public void stopWithResources(@User UserInfo userInfo, List<String> endpoints, @Project String projectName) {
+		List<ProjectEndpointDTO> endpointDTOs = get(projectName)
+				.getEndpoints()
+				.stream()
+				.filter(projectEndpointDTO -> endpoints.contains(projectEndpointDTO.getName()))
+				.collect(Collectors.toList());
+		checkProjectRelatedResourcesInProgress(projectName, endpointDTOs, STOP_ACTION);
 
 		exploratoryDAO.fetchRunningExploratoryFieldsForProject(projectName).forEach(e ->
-				exploratoryService.stop(new UserInfo(e.getUser(), userInfo.getAccessToken()), e.getExploratoryName()));
+				exploratoryService.stop(new UserInfo(e.getUser(), userInfo.getAccessToken()), projectName, e.getExploratoryName()));
 
-		endpoints.stream().filter(e -> !Arrays.asList(UserInstanceStatus.TERMINATED,
-				UserInstanceStatus.TERMINATING, UserInstanceStatus.STOPPED).contains(e.getStatus()))
+		endpointDTOs.stream().filter(e -> !Arrays.asList(UserInstanceStatus.TERMINATED,
+				UserInstanceStatus.TERMINATING, UserInstanceStatus.STOPPED, UserInstanceStatus.FAILED).contains(e.getStatus()))
 				.forEach(e -> stop(userInfo, e.getName(), projectName));
 	}
 
+	@ProjectAdmin
 	@Override
-	public void update(UserInfo userInfo, UpdateProjectDTO projectDTO) {
+	public void update(@User UserInfo userInfo, UpdateProjectDTO projectDTO, @Project String projectName) {
 		final ProjectDTO project = projectDAO.get(projectDTO.getName()).orElseThrow(projectNotFound());
 		final Set<String> endpoints = project.getEndpoints()
 				.stream()
@@ -186,11 +186,6 @@
 	}
 
 	@Override
-	public void updateBudget(String project, Integer budget) {
-		projectDAO.updateBudget(project, budget);
-	}
-
-	@Override
 	public void updateBudget(List<ProjectDTO> projects) {
 		projects.forEach(p -> projectDAO.updateBudget(p.getName(), p.getBudget()));
 	}
@@ -255,20 +250,6 @@
 		}
 	}
 
-	private boolean isCanBeStopped(ProjectDTO projectDTO) {
-        List<ProjectEndpointDTO> endpoints = projectDTO.getEndpoints();
-        return !endpoints.stream().allMatch(e -> exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(
-                projectDTO.getName(), e.getName(), UserInstanceStatus.STOPPED, UserInstanceStatus.TERMINATED,
-                UserInstanceStatus.TERMINATING).isEmpty()) ||
-				endpoints.stream().anyMatch(e -> Arrays.asList(UserInstanceStatus.RUNNING, UserInstanceStatus.STARTING)
-						.contains(e.getStatus()));
-	}
-
-	private boolean isCanBeTerminated(ProjectDTO projectDTO) {
-        return !projectDTO.getEndpoints().stream().allMatch(e -> Objects.equals(UserInstanceStatus.TERMINATED,
-                e.getStatus()));
-	}
-
 	private Supplier<ResourceNotFoundException> projectNotFound() {
 		return () -> new ResourceNotFoundException("Project with passed name not found");
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
index 0efe7f6..e75d9df 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
@@ -19,7 +19,6 @@
 
 package com.epam.dlab.backendapi.service.impl;
 
-import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ComputationalDAO;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
@@ -27,11 +26,8 @@
 import com.epam.dlab.backendapi.service.ReuploadKeyService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
 import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.dto.reuploadkey.ReuploadKeyDTO;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatus;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatusDTO;
-import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.model.ResourceType;
 import com.epam.dlab.rest.client.RESTService;
@@ -39,16 +35,9 @@
 import com.google.inject.Singleton;
 import com.google.inject.name.Named;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.UUID;
 
 import static com.epam.dlab.constants.ServiceConsts.PROVISIONING_SERVICE_NAME;
-import static com.epam.dlab.dto.UserInstanceStatus.REUPLOADING_KEY;
 import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-import static com.epam.dlab.rest.contracts.KeyAPI.REUPLOAD_KEY;
 
 @Singleton
 @Slf4j
@@ -75,34 +64,33 @@
 
 	@Override
 	public void updateResourceData(ReuploadKeyStatusDTO dto) {
-		String user = dto.getUser();
-		ResourceData resource = dto.getReuploadKeyCallbackDTO().getResource();
-		log.debug("Updating resource {} to status RUNNING...", resource.toString());
-		updateResourceStatus(user, resource, RUNNING);
-		if (dto.getReuploadKeyStatus() == ReuploadKeyStatus.COMPLETED) {
-			log.debug(REUPLOAD_KEY_UPDATE_MSG, resource.toString());
-			updateResourceReuploadKeyFlag(user, resource, false);
-		} else {
-			log.error(REUPLOAD_KEY_ERROR_MSG, resource.toString());
-		}
-	}
+        String user = dto.getUser();
+        ResourceData resource = dto.getReuploadKeyCallbackDTO().getResource();
+        log.debug("Updating resource {} to status RUNNING...", resource.toString());
+        updateResourceStatus(user, null, resource, RUNNING);
+        if (dto.getReuploadKeyStatus() == ReuploadKeyStatus.COMPLETED) {
+            log.debug(REUPLOAD_KEY_UPDATE_MSG, resource.toString());
+            updateResourceReuploadKeyFlag(user, null, resource, false);
+        } else {
+            log.error(REUPLOAD_KEY_ERROR_MSG, resource.toString());
+        }
+    }
 
-	private void updateResourceStatus(String user, ResourceData resourceData, UserInstanceStatus newStatus) {
-		if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
-			exploratoryDAO.updateStatusForExploratory(user, resourceData.getExploratoryName(), newStatus);
-		} else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
-			computationalDAO.updateStatusForComputationalResource(user, resourceData.getExploratoryName(),
-					resourceData.getComputationalName(), newStatus);
-		}
-	}
+    private void updateResourceStatus(String user, String project, ResourceData resourceData, UserInstanceStatus newStatus) {
+        if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
+            exploratoryDAO.updateStatusForExploratory(user, project, resourceData.getExploratoryName(), newStatus);
+        } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
+            computationalDAO.updateStatusForComputationalResource(user, project,
+                    resourceData.getExploratoryName(), resourceData.getComputationalName(), newStatus);
+        }
+    }
 
-	private void updateResourceReuploadKeyFlag(String user, ResourceData resourceData, boolean reuploadKeyRequired) {
-		if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
-			exploratoryDAO.updateReuploadKeyForExploratory(user, resourceData.getExploratoryName(),
-					reuploadKeyRequired);
-		} else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
-			computationalDAO.updateReuploadKeyFlagForComputationalResource(user, resourceData.getExploratoryName(),
-					resourceData.getComputationalName(), reuploadKeyRequired);
-		}
-	}
+    private void updateResourceReuploadKeyFlag(String user, String project, ResourceData resourceData, boolean reuploadKeyRequired) {
+        if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
+            exploratoryDAO.updateReuploadKeyForExploratory(user, project, resourceData.getExploratoryName(), reuploadKeyRequired);
+        } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
+            computationalDAO.updateReuploadKeyFlagForComputationalResource(user, project,
+                    resourceData.getExploratoryName(), resourceData.getComputationalName(), reuploadKeyRequired);
+        }
+    }
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
index e84908e..cb7b1c1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
@@ -44,7 +44,12 @@
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.lang3.StringUtils;
 
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
 import java.time.temporal.ChronoUnit;
 import java.util.Date;
 import java.util.List;
@@ -55,7 +60,13 @@
 import java.util.stream.Stream;
 
 import static com.epam.dlab.constants.ServiceConsts.PROVISIONING_SERVICE_NAME;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
 import static com.epam.dlab.dto.base.DataEngineType.getDockerImageName;
 import static java.time.ZoneId.systemDefault;
 import static java.util.Collections.singletonList;
@@ -98,44 +109,44 @@
 	private RESTService provisioningService;
 
 	@Override
-	public SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String exploratoryName) {
-		return schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(user, exploratoryName)
+	public SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String project, String exploratoryName) {
+		return schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(user, project, exploratoryName)
 				.orElseThrow(() -> new ResourceNotFoundException(String.format(SCHEDULER_NOT_FOUND_MSG, user,
 						exploratoryName)));
 	}
 
 	@Override
-	public SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String exploratoryName,
+	public SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String project, String exploratoryName,
 																	 String computationalName) {
-		return schedulerJobDAO.fetchSingleSchedulerJobForCluster(user, exploratoryName, computationalName)
+		return schedulerJobDAO.fetchSingleSchedulerJobForCluster(user, project, exploratoryName, computationalName)
 				.orElseThrow(() -> new ResourceNotFoundException(String.format(SCHEDULER_NOT_FOUND_MSG, user,
 						exploratoryName) + " with computational resource " + computationalName));
 	}
 
 	@Override
-	public void updateExploratorySchedulerData(String user, String exploratoryName, SchedulerJobDTO dto) {
-		validateExploratoryStatus(user, exploratoryName);
+	public void updateExploratorySchedulerData(String user, String project, String exploratoryName, SchedulerJobDTO dto) {
+		validateExploratoryStatus(user, project, exploratoryName);
 		populateDefaultSchedulerValues(dto);
 		log.debug("Updating exploratory {} for user {} with new scheduler job data: {}...", exploratoryName, user,
 				dto);
-		exploratoryDAO.updateSchedulerDataForUserAndExploratory(user, exploratoryName, dto);
+		exploratoryDAO.updateSchedulerDataForUserAndExploratory(user, project, exploratoryName, dto);
 
 		if (!dto.inactivityScheduler() && dto.isSyncStartRequired()) {
-			shareSchedulerJobDataToSparkClusters(user, exploratoryName, dto);
+			shareSchedulerJobDataToSparkClusters(user, project, exploratoryName, dto);
 		} else if (!dto.inactivityScheduler()) {
-			computationalDAO.updateSchedulerSyncFlag(user, exploratoryName, dto.isSyncStartRequired());
+			computationalDAO.updateSchedulerSyncFlag(user, project, exploratoryName, dto.isSyncStartRequired());
 		}
 	}
 
 	@Override
-	public void updateComputationalSchedulerData(String user, String exploratoryName, String computationalName,
+	public void updateComputationalSchedulerData(String user, String project, String exploratoryName, String computationalName,
 												 SchedulerJobDTO dto) {
-		validateExploratoryStatus(user, exploratoryName);
-		validateComputationalStatus(user, exploratoryName, computationalName);
+		validateExploratoryStatus(user, project, exploratoryName);
+		validateComputationalStatus(user, project, exploratoryName, computationalName);
 		populateDefaultSchedulerValues(dto);
 		log.debug("Updating computational resource {} affiliated with exploratory {} for user {} with new scheduler " +
 				"job data {}...", computationalName, exploratoryName, user, dto);
-		computationalDAO.updateSchedulerDataForComputationalResource(user, exploratoryName, computationalName, dto);
+		computationalDAO.updateSchedulerDataForComputationalResource(user, project, exploratoryName, computationalName, dto);
 	}
 
 	@Override
@@ -203,11 +214,12 @@
 	}
 
 	private void stopComputational(SchedulerJobData job) {
+		final String project = job.getProject();
 		final String expName = job.getExploratoryName();
 		final String compName = job.getComputationalName();
 		final String user = job.getUser();
 		log.debug("Stopping exploratory {} computational {} for user {} by scheduler", expName, compName, user);
-		computationalService.stopSparkCluster(securityService.getServiceAccountInfo(user), expName, compName);
+		computationalService.stopSparkCluster(securityService.getServiceAccountInfo(user), project, expName, compName);
 	}
 
 	private void terminateComputational(SchedulerJobData job) {
@@ -216,14 +228,15 @@
 		final String compName = job.getComputationalName();
 		final UserInfo userInfo = securityService.getServiceAccountInfo(user);
 		log.debug("Terminating exploratory {} computational {} for user {} by scheduler", expName, compName, user);
-		computationalService.terminateComputational(userInfo, expName, compName);
+		computationalService.terminateComputational(userInfo, job.getProject(), expName, compName);
 	}
 
 	private void stopExploratory(SchedulerJobData job) {
 		final String expName = job.getExploratoryName();
 		final String user = job.getUser();
+		final String project = job.getProject();
 		log.debug("Stopping exploratory {} for user {} by scheduler", expName, user);
-		exploratoryService.stop(securityService.getServiceAccountInfo(user), expName);
+		exploratoryService.stop(securityService.getServiceAccountInfo(user), project, expName);
 	}
 
 	private List<SchedulerJobData> getExploratorySchedulersForTerminating(OffsetDateTime now) {
@@ -250,7 +263,7 @@
 			log.trace("Starting computational for exploratory {} for user {} by scheduler", exploratoryName, user);
 			final DataEngineType sparkCluster = DataEngineType.SPARK_STANDALONE;
 			final List<UserComputationalResource> compToBeStarted =
-					computationalDAO.findComputationalResourcesWithStatus(user, exploratoryName, STOPPED);
+					computationalDAO.findComputationalResourcesWithStatus(user, project, exploratoryName, STOPPED);
 
 			compToBeStarted
 					.stream()
@@ -261,9 +274,10 @@
 
 	private void terminateExploratory(SchedulerJobData job) {
 		final String user = job.getUser();
+		final String project = job.getProject();
 		final String expName = job.getExploratoryName();
 		log.debug("Terminating exploratory {} for user {} by scheduler", expName, user);
-		exploratoryService.terminate(securityService.getUserInfoOffline(user), expName);
+		exploratoryService.terminate(securityService.getUserInfoOffline(user), project, expName);
 	}
 
 	private void startSpark(String user, String expName, String compName, String project) {
@@ -282,19 +296,20 @@
 	 * performed automatically with notebook stopping since Spark resources have such feature).
 	 *
 	 * @param user            user's name
+	 * @param project         project name
 	 * @param exploratoryName name of exploratory resource
 	 * @param dto             scheduler job data.
 	 */
-	private void shareSchedulerJobDataToSparkClusters(String user, String exploratoryName, SchedulerJobDTO dto) {
-		List<String> correspondingSparkClusters = computationalDAO.getComputationalResourcesWhereStatusIn(user,
-				singletonList(DataEngineType.SPARK_STANDALONE), exploratoryName,
-				STARTING, RUNNING, STOPPING, STOPPED);
+	private void shareSchedulerJobDataToSparkClusters(String user, String project, String exploratoryName, SchedulerJobDTO dto) {
+		List<String> correspondingSparkClusters = computationalDAO.getComputationalResourcesWhereStatusIn(user, project,
+				singletonList(DataEngineType.SPARK_STANDALONE),
+				exploratoryName, STARTING, RUNNING, STOPPING, STOPPED);
 		SchedulerJobDTO dtoWithoutStopData = getSchedulerJobWithoutStopData(dto);
 		for (String sparkName : correspondingSparkClusters) {
 			log.debug("Updating computational resource {} affiliated with exploratory {} for user {} with new " +
 					"scheduler job data {}...", sparkName, exploratoryName, user, dtoWithoutStopData);
-			computationalDAO.updateSchedulerDataForComputationalResource(user, exploratoryName, sparkName,
-					dtoWithoutStopData);
+			computationalDAO.updateSchedulerDataForComputationalResource(user, project, exploratoryName,
+					sparkName, dtoWithoutStopData);
 		}
 	}
 
@@ -367,10 +382,11 @@
 	}
 
 	private boolean computationalInactivityExceed(SchedulerJobData schedulerJobData, SchedulerJobDTO schedulerData) {
+		final String projectName = schedulerJobData.getProject();
 		final String explName = schedulerJobData.getExploratoryName();
 		final String compName = schedulerJobData.getComputationalName();
 		final String user = schedulerJobData.getUser();
-		final UserComputationalResource c = computationalDAO.fetchComputationalFields(user, explName, compName);
+		final UserComputationalResource c = computationalDAO.fetchComputationalFields(user, projectName, explName, compName);
 		final Long maxInactivity = schedulerData.getMaxInactivity();
 		return inactivityCondition(maxInactivity, c.getStatus(), c.getLastActivity());
 	}
@@ -381,9 +397,10 @@
 	}
 
 	private boolean exploratoryInactivityExceed(SchedulerJobData schedulerJobData, SchedulerJobDTO schedulerData) {
+		final String project = schedulerJobData.getProject();
 		final String expName = schedulerJobData.getExploratoryName();
 		final String user = schedulerJobData.getUser();
-		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(user, expName, true);
+		final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(user, project, expName, true);
 		final boolean canBeStopped = userInstanceDTO.getResources()
 				.stream()
 				.map(UserComputationalResource::getStatus)
@@ -409,14 +426,14 @@
 		}
 	}
 
-	private void validateExploratoryStatus(String user, String exploratoryName) {
-		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(user, exploratoryName);
+	private void validateExploratoryStatus(String user, String project, String exploratoryName) {
+		final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(user, project, exploratoryName);
 		validateResourceStatus(userInstance.getStatus());
 	}
 
-	private void validateComputationalStatus(String user, String exploratoryName, String computationalName) {
+	private void validateComputationalStatus(String user, String project, String exploratoryName, String computationalName) {
 		final UserComputationalResource computationalResource =
-				computationalDAO.fetchComputationalFields(user, exploratoryName, computationalName);
+				computationalDAO.fetchComputationalFields(user, project, exploratoryName, computationalName);
 		final String computationalStatus = computationalResource.getStatus();
 		validateResourceStatus(computationalStatus);
 	}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java
index 0a71587..1758a8b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImpl.java
@@ -18,73 +18,66 @@
  */
 package com.epam.dlab.backendapi.service.impl;
 
+import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.dao.UserGroupDao;
 import com.epam.dlab.backendapi.dao.UserRoleDao;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
+import com.epam.dlab.backendapi.roles.UserRoles;
+import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.backendapi.service.UserGroupService;
 import com.epam.dlab.dto.UserInstanceStatus;
+import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceConflictException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
 import lombok.extern.slf4j.Slf4j;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 @Singleton
 @Slf4j
 public class UserGroupServiceImpl implements UserGroupService {
-
 	private static final String ROLE_NOT_FOUND_MSG = "Any of role : %s were not found";
+
 	@Inject
 	private UserGroupDao userGroupDao;
 	@Inject
 	private UserRoleDao userRoleDao;
 	@Inject
 	private ProjectDAO projectDAO;
+	@Inject
+	private ProjectService projectService;
 
 	@Override
 	public void createGroup(String group, Set<String> roleIds, Set<String> users) {
 		checkAnyRoleFound(roleIds, userRoleDao.addGroupToRole(Collections.singleton(group), roleIds));
-		if (!users.isEmpty()) {
-			log.debug("Adding users {} to group {}", users, group);
-			userGroupDao.addUsers(group, users);
-		}
-	}
-
-	@Override
-	public void updateGroup(String group, Set<String> roleIds, Set<String> users) {
-		log.debug("Updating users for group {}: {}", group, users);
-		userGroupDao.updateUsers(group, users);
-		log.debug("Removing group {} from existing roles", group);
-		userRoleDao.removeGroupWhenRoleNotIn(group, roleIds);
-		log.debug("Adding group {} to roles {}", group, roleIds);
-		userRoleDao.addGroupToRole(Collections.singleton(group), roleIds);
-	}
-
-	@Override
-	public void addUsersToGroup(String group, Set<String> users) {
+		log.debug("Adding users {} to group {}", users, group);
 		userGroupDao.addUsers(group, users);
 	}
 
 	@Override
-	public void updateRolesForGroup(String group, Set<String> roleIds) {
-		userRoleDao.removeGroupWhenRoleNotIn(group, roleIds);
-		checkAnyRoleFound(roleIds, userRoleDao.addGroupToRole(Collections.singleton(group), roleIds));
-	}
-
-	@Override
-	public void removeUserFromGroup(String group, String user) {
-		userGroupDao.removeUser(group, user);
-	}
-
-	@Override
-	public void removeGroupFromRole(Set<String> groups, Set<String> roleIds) {
-		checkAnyRoleFound(roleIds, userRoleDao.removeGroupFromRole(groups, roleIds));
+	public void updateGroup(UserInfo user, String group, Set<String> roleIds, Set<String> users) {
+		if (UserRoles.isAdmin(user)) {
+			updateGroup(group, roleIds, users);
+		} else if (UserRoles.isProjectAdmin(user)) {
+			projectService.getProjects(user)
+					.stream()
+					.map(ProjectDTO::getGroups)
+					.flatMap(Collection::stream)
+					.filter(g -> g.equalsIgnoreCase(group))
+					.findAny()
+					.orElseThrow(() -> new DlabException(String.format("User %s doesn't have appropriate permission", user.getName())));
+			updateGroup(group, roleIds, users);
+		} else {
+			throw new DlabException(String.format("User %s doesn't have appropriate permission", user.getName()));
+		}
 	}
 
 	@Override
@@ -102,8 +95,31 @@
 	}
 
 	@Override
-	public List<UserGroupDto> getAggregatedRolesByGroup() {
-		return userRoleDao.aggregateRolesByGroup();
+	public List<UserGroupDto> getAggregatedRolesByGroup(UserInfo user) {
+		if (UserRoles.isAdmin(user)) {
+			return userRoleDao.aggregateRolesByGroup(true);
+		} else if (UserRoles.isProjectAdmin(user)) {
+			Set<String> groups = projectService.getProjects(user)
+					.stream()
+					.map(ProjectDTO::getGroups)
+					.flatMap(Collection::stream)
+					.collect(Collectors.toSet());
+			return userRoleDao.aggregateRolesByGroup(false)
+					.stream()
+					.filter(userGroup -> groups.contains(userGroup.getGroup()))
+					.collect(Collectors.toList());
+		} else {
+			throw new DlabException(String.format("User %s doesn't have appropriate permission", user.getName()));
+		}
+	}
+
+	private void updateGroup(String group, Set<String> roleIds, Set<String> users) {
+		log.debug("Updating users for group {}: {}", group, users);
+		userGroupDao.updateUsers(group, users);
+		log.debug("Removing group {} from existing roles", group);
+		userRoleDao.removeGroupWhenRoleNotIn(group, roleIds);
+		log.debug("Adding group {} to roles {}", group, roleIds);
+		userRoleDao.addGroupToRole(Collections.singleton(group), roleIds);
 	}
 
 	private void checkAnyRoleFound(Set<String> roleIds, boolean anyRoleFound) {
@@ -111,6 +127,4 @@
 			throw new ResourceNotFoundException(String.format(ROLE_NOT_FOUND_MSG, roleIds));
 		}
 	}
-
-
 }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/BillingUtils.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/BillingUtils.java
index 33b83fd..4995de4 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/BillingUtils.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/BillingUtils.java
@@ -20,10 +20,10 @@
 package com.epam.dlab.backendapi.util;
 
 import com.epam.dlab.backendapi.domain.BillingReportLine;
+import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.dto.billing.BillingResourceType;
 import com.epam.dlab.dto.computational.UserComputationalResource;
 import jersey.repackaged.com.google.common.collect.Lists;
 import org.apache.commons.lang3.StringUtils;
@@ -37,11 +37,21 @@
 import java.util.Optional;
 import java.util.stream.Stream;
 
-public class BillingUtils {
+import static com.epam.dlab.dto.billing.BillingResourceType.BUCKET;
+import static com.epam.dlab.dto.billing.BillingResourceType.COMPUTATIONAL;
+import static com.epam.dlab.dto.billing.BillingResourceType.EDGE;
+import static com.epam.dlab.dto.billing.BillingResourceType.ENDPOINT;
+import static com.epam.dlab.dto.billing.BillingResourceType.EXPLORATORY;
+import static com.epam.dlab.dto.billing.BillingResourceType.IMAGE;
+import static com.epam.dlab.dto.billing.BillingResourceType.SSN;
+import static com.epam.dlab.dto.billing.BillingResourceType.VOLUME;
 
+public class BillingUtils {
     private static final String[] REPORT_HEADERS = {"DLab ID", "User", "Project", "DLab Resource Type", "Shape", "Product", "Cost"};
     private static final String REPORT_FIRST_LINE = "Service base name: %s. Available reporting period from: %s to: %s";
     private static final String TOTAL_LINE = "Total: %s %s";
+    private static final String SSN_FORMAT = "%s-ssn";
+    private static final String ENDPOINT_FORMAT = "%s-%s-endpoint";
     private static final String EDGE_FORMAT = "%s-%s-%s-edge";
     private static final String EDGE_VOLUME_FORMAT = "%s-%s-%s-edge-volume-primary";
     private static final String PROJECT_ENDPOINT_BUCKET_FORMAT = "%s-%s-%s-bucket";
@@ -50,9 +60,15 @@
     private static final String VOLUME_PRIMARY_COMPUTATIONAL_FORMAT = "%s-%s-volume-primary";
     private static final String VOLUME_SECONDARY_FORMAT = "%s-volume-secondary";
     private static final String VOLUME_SECONDARY_COMPUTATIONAL_FORMAT = "%s-%s-volume-secondary";
+    private static final String IMAGE_STANDARD_FORMAT1 = "%s-%s-%s-%s-notebook-image";
+    private static final String IMAGE_STANDARD_FORMAT2 = "%s-%s-%s-notebook-image";
+    private static final String IMAGE_CUSTOM_FORMAT = "%s-%s-%s-%s-%s";
+    private static final String IMAGE_NAME_PREFIX = "docker.dlab-";
+
     private static final String VOLUME_PRIMARY = "Volume primary";
     private static final String VOLUME_SECONDARY = "Volume secondary";
     private static final String SHARED_RESOURCE = "Shared resource";
+    private static final String IMAGE_NAME = "Image";
 
     private static final String DATAENGINE_NAME_FORMAT = "%d x %s";
     private static final String DATAENGINE_SERVICE_NAME_FORMAT = "Master: %s%sSlave:  %d x %s";
@@ -61,56 +77,73 @@
         final String userEdgeId = String.format(EDGE_FORMAT, sbn, project.toLowerCase(), endpoint);
         final String edgeVolumeId = String.format(EDGE_VOLUME_FORMAT, sbn, project.toLowerCase(), endpoint);
         final String endpointBucketId = String.format(PROJECT_ENDPOINT_BUCKET_FORMAT, sbn, project.toLowerCase(), endpoint);
-        final String projectEndpointBucketId = String.format(ENDPOINT_SHARED_BUCKET_FORMAT, sbn, endpoint);
 
         return Stream.of(
-                BillingReportLine.builder().resourceName("EDGE node").user(SHARED_RESOURCE).project(project).dlabId(userEdgeId).resourceType(BillingResourceType.EDGE).status(UserInstanceStatus.of(status)).build(),
-                BillingReportLine.builder().resourceName("EDGE volume").user(SHARED_RESOURCE).project(project).dlabId(edgeVolumeId).resourceType(BillingResourceType.VOLUME).build(),
-                BillingReportLine.builder().resourceName("Project endpoint shared bucket").user(SHARED_RESOURCE).project(project).dlabId(endpointBucketId).resourceType(BillingResourceType.BUCKET).build(),
-                BillingReportLine.builder().resourceName("Endpoint shared bucket").user(SHARED_RESOURCE).project(SHARED_RESOURCE).dlabId(projectEndpointBucketId).resourceType(BillingResourceType.BUCKET).build()
+                BillingReportLine.builder().resourceName("EDGE node").user(SHARED_RESOURCE).project(project).dlabId(userEdgeId).resourceType(EDGE).status(UserInstanceStatus.of(status)).build(),
+                BillingReportLine.builder().resourceName("EDGE volume").user(SHARED_RESOURCE).project(project).dlabId(edgeVolumeId).resourceType(VOLUME).build(),
+                BillingReportLine.builder().resourceName("Project endpoint shared bucket").user(SHARED_RESOURCE).project(project).dlabId(endpointBucketId).resourceType(BUCKET).build()
         );
     }
 
     public static Stream<BillingReportLine> ssnBillingDataStream(String sbn) {
-        final String ssnId = sbn + "-ssn";
+        final String ssnId = String.format(SSN_FORMAT, sbn);
         return Stream.of(
-                BillingReportLine.builder().user(SHARED_RESOURCE).project(SHARED_RESOURCE).resourceName("SSN").dlabId(ssnId).resourceType(BillingResourceType.SSN).build(),
-                BillingReportLine.builder().user(SHARED_RESOURCE).project(SHARED_RESOURCE).resourceName("SSN Volume").dlabId(String.format(VOLUME_PRIMARY_FORMAT, ssnId)).resourceType(BillingResourceType.VOLUME).build()
+                BillingReportLine.builder().user(SHARED_RESOURCE).project(SHARED_RESOURCE).resourceName("SSN").dlabId(ssnId).resourceType(SSN).build(),
+                BillingReportLine.builder().user(SHARED_RESOURCE).project(SHARED_RESOURCE).resourceName("SSN Volume").dlabId(String.format(VOLUME_PRIMARY_FORMAT, ssnId)).resourceType(VOLUME).build()
         );
     }
 
-    public static Stream<BillingReportLine> exploratoryBillingDataStream(UserInstanceDTO userInstance, Integer maxSparkInstanceCount) {
+    public static Stream<BillingReportLine> sharedEndpointBillingDataStream(String endpoint, String sbn) {
+        final String projectEndpointBucketId = String.format(ENDPOINT_SHARED_BUCKET_FORMAT, sbn, endpoint.toLowerCase());
+        final String endpointId = String.format(ENDPOINT_FORMAT, sbn, endpoint.toLowerCase());
+        return Stream.of(
+                BillingReportLine.builder().resourceName("Endpoint shared bucket").user(SHARED_RESOURCE).project(SHARED_RESOURCE).dlabId(projectEndpointBucketId).resourceType(BUCKET).build(),
+                BillingReportLine.builder().resourceName("Endpoint").user(SHARED_RESOURCE).project(SHARED_RESOURCE).dlabId(endpointId).resourceType(ENDPOINT).build()
+        );
+    }
+
+    public static Stream<BillingReportLine> exploratoryBillingDataStream(UserInstanceDTO userInstance, Integer maxSparkInstanceCount, String sbn) {
         final Stream<BillingReportLine> computationalStream = userInstance.getResources()
                 .stream()
                 .filter(cr -> cr.getComputationalId() != null)
                 .flatMap(cr -> Stream.concat(Stream.of(
-                        withUserProject(userInstance).dlabId(cr.getComputationalId()).resourceName(cr.getComputationalName()).resourceType(BillingResourceType.COMPUTATIONAL)
+                        withUserProject(userInstance).dlabId(cr.getComputationalId()).resourceName(cr.getComputationalName()).resourceType(COMPUTATIONAL)
                                 .status(UserInstanceStatus.of(cr.getStatus())).shape(getComputationalShape(cr)).build(),
                         withUserProject(userInstance).resourceName(cr.getComputationalName() + ":" + VOLUME_PRIMARY).dlabId(String.format(VOLUME_PRIMARY_COMPUTATIONAL_FORMAT, cr.getComputationalId(), "m"))
-                                .resourceType(BillingResourceType.VOLUME).build(),
+                                .resourceType(VOLUME).build(),
                         withUserProject(userInstance).resourceName(cr.getComputationalName() + ":" + VOLUME_SECONDARY).dlabId(String.format(VOLUME_SECONDARY_COMPUTATIONAL_FORMAT, cr.getComputationalId(), "m"))
-                                .resourceType(BillingResourceType.VOLUME).build()
+                                .resourceType(VOLUME).build()
                         ),
                         getSlaveVolumes(userInstance, cr, maxSparkInstanceCount)
                 ));
         final String exploratoryId = userInstance.getExploratoryId();
+        final String imageId1 = String.format(IMAGE_STANDARD_FORMAT1, sbn, userInstance.getProject(), userInstance.getEndpoint(), userInstance.getImageName().replace(IMAGE_NAME_PREFIX, ""));
+        final String imageId2 = String.format(IMAGE_STANDARD_FORMAT2, sbn, userInstance.getEndpoint(), userInstance.getImageName().replace(IMAGE_NAME_PREFIX, ""));
         final String primaryVolumeId = String.format(VOLUME_PRIMARY_FORMAT, exploratoryId);
         final String secondaryVolumeId = String.format(VOLUME_SECONDARY_FORMAT, exploratoryId);
         final Stream<BillingReportLine> exploratoryStream = Stream.of(
-                withUserProject(userInstance).resourceName(userInstance.getExploratoryName()).dlabId(exploratoryId).resourceType(BillingResourceType.EXPLORATORY)
-                        .status(UserInstanceStatus.of(userInstance.getStatus())).shape(userInstance.getShape()).build(),
-                withUserProject(userInstance).resourceName(VOLUME_PRIMARY).dlabId(primaryVolumeId).resourceType(BillingResourceType.VOLUME).build(),
-                withUserProject(userInstance).resourceName(VOLUME_SECONDARY).dlabId(secondaryVolumeId).resourceType(BillingResourceType.VOLUME).build());
+                withUserProject(userInstance).resourceName(userInstance.getExploratoryName()).dlabId(exploratoryId).resourceType(EXPLORATORY).status(UserInstanceStatus.of(userInstance.getStatus())).shape(userInstance.getShape()).build(),
+                BillingReportLine.builder().resourceName(IMAGE_NAME).dlabId(imageId1).project(userInstance.getProject()).resourceType(IMAGE).build(),
+                BillingReportLine.builder().resourceName(IMAGE_NAME).dlabId(imageId2).project(userInstance.getProject()).resourceType(IMAGE).build(),
+                withUserProject(userInstance).resourceName(VOLUME_PRIMARY).dlabId(primaryVolumeId).resourceType(VOLUME).build(),
+                withUserProject(userInstance).resourceName(VOLUME_SECONDARY).dlabId(secondaryVolumeId).resourceType(VOLUME).build());
         return Stream.concat(computationalStream, exploratoryStream);
     }
 
+    public static Stream<BillingReportLine> customImageBillingDataStream(ImageInfoRecord image, String sbn) {
+        String imageId = String.format(IMAGE_CUSTOM_FORMAT, sbn, image.getProject(), image.getEndpoint(), image.getApplication(), image.getName());
+        return Stream.of(
+                BillingReportLine.builder().resourceName(IMAGE_NAME).project(image.getProject()).dlabId(imageId).resourceType(IMAGE).build()
+        );
+    }
+
     private static Stream<BillingReportLine> getSlaveVolumes(UserInstanceDTO userInstance, UserComputationalResource cr, Integer maxSparkInstanceCount) {
         List<BillingReportLine> list = new ArrayList<>();
         for (int i = 1; i <= maxSparkInstanceCount; i++) {
             list.add(withUserProject(userInstance).resourceName(cr.getComputationalName() + ":" + VOLUME_PRIMARY).dlabId(String.format(VOLUME_PRIMARY_COMPUTATIONAL_FORMAT, cr.getComputationalId(), "s" + i))
-                    .resourceType(BillingResourceType.VOLUME).build());
+                    .resourceType(VOLUME).build());
             list.add(withUserProject(userInstance).resourceName(cr.getComputationalName() + ":" + VOLUME_PRIMARY).dlabId(String.format(VOLUME_SECONDARY_COMPUTATIONAL_FORMAT, cr.getComputationalId(), "s" + i))
-                    .resourceType(BillingResourceType.VOLUME).build());
+                    .resourceType(VOLUME).build());
         }
         return list.stream();
     }
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
index 0e29820..69aeb6c 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
@@ -50,8 +50,21 @@
 import com.epam.dlab.dto.base.CloudSettings;
 import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
-import com.epam.dlab.dto.computational.*;
-import com.epam.dlab.dto.exploratory.*;
+import com.epam.dlab.dto.computational.ComputationalCheckInactivityDTO;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStartDTO;
+import com.epam.dlab.dto.computational.ComputationalStopDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.UserComputationalResource;
+import com.epam.dlab.dto.exploratory.ExploratoryActionDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryCheckInactivityAction;
+import com.epam.dlab.dto.exploratory.ExploratoryCreateDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryGitCredsDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryGitCredsUpdateDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryImageDTO;
+import com.epam.dlab.dto.exploratory.ExploratoryReconfigureSparkClusterActionDTO;
+import com.epam.dlab.dto.exploratory.LibInstallDTO;
+import com.epam.dlab.dto.exploratory.LibraryInstallDTO;
 import com.epam.dlab.dto.gcp.GcpCloudSettings;
 import com.epam.dlab.dto.gcp.computational.ComputationalCreateGcp;
 import com.epam.dlab.dto.gcp.computational.GcpComputationalTerminateDTO;
@@ -69,7 +82,9 @@
 import java.util.Map;
 import java.util.UUID;
 
-import static com.epam.dlab.cloud.CloudProvider.*;
+import static com.epam.dlab.cloud.CloudProvider.AWS;
+import static com.epam.dlab.cloud.CloudProvider.AZURE;
+import static com.epam.dlab.cloud.CloudProvider.GCP;
 
 @Singleton
 public class RequestBuilder {
@@ -284,6 +299,7 @@
 		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ExploratoryActionDTO.class)
 				.withNotebookInstanceName(userInstance.getExploratoryId())
 				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName())
 				.withNotebookImage(userInstance.getImageName())
 				.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()))
 				.withExploratoryName(userInstance.getExploratoryName());
@@ -316,6 +332,7 @@
 		return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), LibListComputationalDTO.class)
 				.withComputationalId(computationalResource.getComputationalId())
 				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName())
 				.withComputationalImage(computationalResource.getImageName())
 				.withLibCacheKey(ExploratoryLibCache.libraryCacheKey(userInstance))
 				.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()));
@@ -474,7 +491,8 @@
 				.withComputationalName(computationalName)
 				.withNotebookInstanceName(exploratory.getExploratoryId())
 				.withApplicationName(getApplicationNameFromImage(exploratory.getImageName()))
-				.withProject(exploratory.getProject());
+				.withProject(exploratory.getProject())
+				.withEndpoint(endpointDTO.getName());
 	}
 
 	@SuppressWarnings("unchecked")
@@ -485,7 +503,8 @@
 				.withComputationalName(computationalName)
 				.withNotebookInstanceName(exploratory.getExploratoryId())
 				.withApplicationName(getApplicationNameFromImage(exploratory.getImageName()))
-				.withProject(exploratory.getProject());
+				.withProject(exploratory.getProject())
+				.withEndpoint(endpointDTO.getName());
 	}
 
 	@SuppressWarnings("unchecked")
@@ -515,7 +534,8 @@
 				.withNotebookImageName(exploratory.getImageName())
 				.withImage(cr.getImageName())
 				.withComputationalId(cr.getComputationalId())
-				.withProject(exploratory.getProject());
+				.withProject(exploratory.getProject())
+				.withEndpoint(endpointDTO.getName());
 	}
 
 
@@ -585,7 +605,8 @@
 				.withNotebookImage(userInstance.getImageName())
 				.withExploratoryName(userInstance.getExploratoryName())
 				.withReuploadKeyRequired(userInstance.isReuploadKeyRequired())
-				.withProject(userInstance.getProject());
+				.withProject(userInstance.getProject())
+				.withEndpoint(endpointDTO.getName());
 		return dto;
 	}
 
diff --git a/services/self-service/src/main/resources/mongo/aws/mongo_roles.json b/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
index 54d2cd6..6a8fd29 100644
--- a/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
@@ -2,6 +2,8 @@
   {
     "_id": "nbShapes_p2.xlarge_fetching",
     "description": "Use p2.xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "p2.xlarge"
     ],
@@ -12,6 +14,8 @@
   {
     "_id": "nbShapes_t2.medium_fetching",
     "description": "Use t2.medium instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "t2.medium"
     ],
@@ -22,6 +26,8 @@
   {
     "_id": "nbShapes_r3.xlarge_fetching",
     "description": "Use r3.xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r3.xlarge"
     ],
@@ -32,6 +38,8 @@
   {
     "_id": "nbShapes_r4.2xlarge_fetching",
     "description": "Use r4.2xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r4.2xlarge"
     ],
@@ -42,6 +50,8 @@
   {
     "_id": "nbShapes_r3.4xlarge_fetching",
     "description": "Use r3.4xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r3.4xlarge"
     ],
@@ -52,6 +62,8 @@
   {
     "_id": "nbShapes_r3.8xlarge_fetching",
     "description": "Use r3.8xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "r3.8xlarge"
     ],
@@ -62,6 +74,8 @@
   {
     "_id": "nbShapes_c4.large_fetching",
     "description": "Use c4.large instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "c4.large"
     ],
@@ -72,6 +86,8 @@
   {
     "_id": "nbShapes_c4.2xlarge_fetching",
     "description": "Use c4.2xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "c4.2xlarge"
     ],
@@ -82,6 +98,8 @@
   {
     "_id": "nbShapes_c4.8xlarge_fetching",
     "description": "Use c4.8xlarge instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AWS",
     "exploratory_shapes": [
       "c4.8xlarge"
     ],
@@ -92,6 +110,8 @@
   {
     "_id": "nbCreateDeeplearning",
     "description": "Create Notebook Deep Learning",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-deeplearning"
     ],
@@ -102,6 +122,8 @@
   {
     "_id": "nbCreateJupyter",
     "description": "Create Notebook Jupyter",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-jupyter"
     ],
@@ -112,6 +134,8 @@
   {
     "_id": "nbCreateJupyterLab",
     "description": "Create Notebook JupyterLab",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-jupyterlab"
     ],
@@ -122,6 +146,8 @@
   {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-rstudio"
     ],
@@ -132,6 +158,8 @@
   {
     "_id": "nbCreateTensor",
     "description": "Create Notebook Jupyter with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-tensor"
     ],
@@ -142,6 +170,8 @@
   {
     "_id": "nbCreateZeppelin",
     "description": "Create Notebook Apache Zeppelin",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-zeppelin"
     ],
@@ -152,6 +182,8 @@
   {
     "_id": "nbCreateTensorRstudio",
     "description": "Create Notebook RStudio with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "AWS",
     "exploratories": [
       "docker.dlab-tensor-rstudio"
     ],
@@ -162,6 +194,8 @@
   {
     "_id": "nbCreateDataEngine",
     "description": "Create Data Engine",
+    "type": "COMPUTATIONAL",
+    "cloud": "AWS",
     "computationals": [
       "docker.dlab-dataengine"
     ],
@@ -172,6 +206,8 @@
   {
     "_id": "nbCreateDataEngineService",
     "description": "Create Data Engine Service",
+    "type": "COMPUTATIONAL",
+    "cloud": "AWS",
     "computationals": [
       "docker.dlab-dataengine-service"
     ],
@@ -182,6 +218,8 @@
   {
     "_id": "compShapes_c4.xlarge_fetching",
     "description": "Use c4.xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "c4.xlarge"
     ],
@@ -192,6 +230,8 @@
   {
     "_id": "compShapes_r3.xlarge_fetching",
     "description": "Use r3.xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r3.xlarge"
     ],
@@ -202,6 +242,8 @@
   {
     "_id": "compShapes_r4.2xlarge_fetching",
     "description": "Use r4.2xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r4.2xlarge"
     ],
@@ -212,6 +254,8 @@
   {
     "_id": "compShapes_r3.4xlarge_fetching",
     "description": "Use r3.4xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r3.4xlarge"
     ],
@@ -222,6 +266,8 @@
   {
     "_id": "compShapes_r3.8xlarge_fetching",
     "description": "Use r3.8xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "r3.8xlarge"
     ],
@@ -232,6 +278,8 @@
   {
     "_id": "compShapes_c4.2xlarge_fetching",
     "description": "Use c4.2xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "c4.2xlarge"
     ],
@@ -242,6 +290,8 @@
   {
     "_id": "compShapes_c4.8xlarge_fetching",
     "description": "Use c4.8xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "c4.8xlarge"
     ],
@@ -252,6 +302,8 @@
   {
     "_id": "compShapes_p2.xlarge_fetching",
     "description": "Use p2.xlarge instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AWS",
     "computational_shapes": [
       "p2.xlarge"
     ],
@@ -262,6 +314,8 @@
   {
     "_id": "nbBillingReportFull",
     "description": "View full billing report for all users",
+    "type": "BILLING",
+    "cloud": "AWS",
     "pages": [
       "/api/infrastructure_provision/billing"
     ],
@@ -270,12 +324,33 @@
     ]
   },
   {
+    "_id": "projectAdmin",
+    "description": "Allow to execute administration operation per project",
+    "type": "ADMINISTRATION",
+    "cloud": "AWS",
+    "pages": [
+      "environment/*",
+      "/roleManagement",
+      "/api/settings",
+      "/user/settings",
+      "/api/project",
+      "/api/endpoint"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "admin",
     "description": "Allow to execute administration operation",
+    "type": "ADMINISTRATION",
+    "cloud": "AWS",
     "pages": [
       "environment/*",
       "/api/infrastructure/backup",
       "/roleManagement",
+      "/roleManagement/create",
+      "/roleManagement/delete",
       "/api/settings",
       "/user/settings",
       "/api/project",
diff --git a/services/self-service/src/main/resources/mongo/azure/mongo_roles.json b/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
index 58cadb3..86eadff 100644
--- a/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
@@ -1,7 +1,9 @@
 [
   {
     "_id": "nbShapes_Standard_NC6_fetching",
-    "description": "Allow to use Standard_NC6 instance shape for notebook",
+    "description": "Use Standard_NC6 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_NC6"
     ],
@@ -12,6 +14,8 @@
   {
     "_id": "nbShapes_Standard_E4s_v3_fetching",
     "description": "Use Standard_E4s_v3 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_E4s_v3"
     ],
@@ -22,6 +26,8 @@
   {
     "_id": "nbShapes_Standard_E16s_v3_fetching",
     "description": "Use Standard_E16s_v3 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_E16s_v3"
     ],
@@ -32,6 +38,8 @@
   {
     "_id": "nbShapes_Standard_E32s_v3_fetching",
     "description": "Use Standard_E32s_v3 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_E32s_v3"
     ],
@@ -42,6 +50,8 @@
   {
     "_id": "nbShapes_Standard_F2s_fetching",
     "description": "Use Standard_F2s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F2s"
     ],
@@ -52,6 +62,8 @@
   {
     "_id": "nbShapes_Standard_F4s_fetching",
     "description": "Use Standard_F4s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F4s"
     ],
@@ -62,6 +74,8 @@
   {
     "_id": "nbShapes_Standard_F8s_fetching",
     "description": "Use Standard_F8s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F8s"
     ],
@@ -72,6 +86,8 @@
   {
     "_id": "nbShapes_Standard_F16s_fetching",
     "description": "Use Standard_F16s instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "AZURE",
     "exploratory_shapes": [
       "Standard_F16s"
     ],
@@ -82,6 +98,8 @@
   {
     "_id": "nbCreateDeeplearning",
     "description": "Create Notebook Deep Learning",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-deeplearning"
     ],
@@ -92,6 +110,8 @@
   {
     "_id": "nbCreateJupyter",
     "description": "Create Notebook Jupyter",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-jupyter"
     ],
@@ -102,6 +122,8 @@
   {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-rstudio"
     ],
@@ -112,6 +134,8 @@
   {
     "_id": "nbCreateTensor",
     "description": "Create Notebook Jupyter with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-tensor"
     ],
@@ -122,6 +146,8 @@
   {
     "_id": "nbCreateZeppelin",
     "description": "Create Notebook Apache Zeppelin",
+    "type": "NOTEBOOK",
+    "cloud": "AZURE",
     "exploratories": [
       "docker.dlab-zeppelin"
     ],
@@ -132,6 +158,8 @@
   {
     "_id": "nbCreateDataEngine",
     "description": "Create Data Engine",
+    "type": "COMPUTATIONAL",
+    "cloud": "AZURE",
     "computationals": [
       "docker.dlab-dataengine"
     ],
@@ -142,6 +170,8 @@
   {
     "_id": "compShapes_Standard_F4s_fetching",
     "description": "Use Standard_F4s instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_F4s"
     ],
@@ -152,6 +182,8 @@
   {
     "_id": "compShapes_Standard_E4s_v3_fetching",
     "description": "Use Standard_E4s_v3 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_E4s_v3"
     ],
@@ -162,6 +194,8 @@
   {
     "_id": "compShapes_Standard_E16s_v3_fetching",
     "description": "Use Standard_E16s_v3 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_E16s_v3"
     ],
@@ -172,6 +206,8 @@
   {
     "_id": "compShapes_Standard_E32s_v3_fetching",
     "description": "Use Standard_E32s_v3 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_E32s_v3"
     ],
@@ -182,6 +218,8 @@
   {
     "_id": "compShapes_Standard_F8s_fetching",
     "description": "Use Standard_F8s instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_F8s"
     ],
@@ -192,6 +230,8 @@
   {
     "_id": "compShapes_Standard_F16s_fetching",
     "description": "Use Standard_F16s instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_F16s"
     ],
@@ -202,6 +242,8 @@
   {
     "_id": "compShapes_Standard_NC6_fetching",
     "description": "Use Standard_NC6 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "AZURE",
     "computational_shapes": [
       "Standard_NC6"
     ],
@@ -212,6 +254,8 @@
   {
     "_id": "nbBillingReportFull",
     "description": "View full billing report for all users",
+    "type": "BILLING",
+    "cloud": "AZURE",
     "pages": [
       "/api/infrastructure_provision/billing"
     ],
@@ -220,12 +264,33 @@
     ]
   },
   {
+    "_id": "projectAdmin",
+    "description": "Allow to execute administration operation per project",
+    "type": "ADMINISTRATION",
+    "cloud": "AZURE",
+    "pages": [
+      "environment/*",
+      "/roleManagement",
+      "/api/settings",
+      "/user/settings",
+      "/api/project",
+      "/api/endpoint"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
+  },
+  {
     "_id": "admin",
     "description": "Allow to execute administration operation",
+    "type": "ADMINISTRATION",
+    "cloud": "AZURE",
     "pages": [
       "environment/*",
       "/api/infrastructure/backup",
       "/roleManagement",
+      "/roleManagement/create",
+      "/roleManagement/delete",
       "/api/settings",
       "/user/settings",
       "/api/project",
diff --git a/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json b/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
index 43d12e3..d2ef6dd 100644
--- a/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
@@ -2,6 +2,8 @@
   {
     "_id": "nbShapes_n1-highcpu-2_fetching",
     "description": "Use n1-highcpu-2 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highcpu-2"
     ],
@@ -12,6 +14,8 @@
   {
     "_id": "nbShapes_n1-highcpu-8_fetching",
     "description": "Use n1-highcpu-8 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highcpu-8"
     ],
@@ -22,6 +26,8 @@
   {
     "_id": "nbShapes_n1-highcpu-32_fetching",
     "description": "Use n1-highcpu-32 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highcpu-32"
     ],
@@ -32,6 +38,8 @@
   {
     "_id": "nbShapes_n1-highmem-4_fetching",
     "description": "Use n1-highmem-4 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highmem-4"
     ],
@@ -42,6 +50,8 @@
   {
     "_id": "nbShapes_n1-highmem-16_fetching",
     "description": "Use n1-highmem-16 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highmem-16"
     ],
@@ -52,6 +62,8 @@
   {
     "_id": "nbShapes_n1-highmem-32_fetching",
     "description": "Use n1-highmem-32 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-highmem-32"
     ],
@@ -62,6 +74,8 @@
   {
     "_id": "nbShapes_n1-standard-2_fetching",
     "description": "Use n1-standard-2 instance shape for notebook",
+    "type": "NOTEBOOK_SHAPE",
+    "cloud": "GCP",
     "exploratory_shapes": [
       "n1-standard-2"
     ],
@@ -72,6 +86,8 @@
   {
     "_id": "nbCreateDeeplearning",
     "description": "Create Notebook Deep Learning",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-deeplearning"
     ],
@@ -82,6 +98,8 @@
   {
     "_id": "nbCreateJupyter",
     "description": "Create Notebook Jupyter",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-jupyter"
     ],
@@ -92,6 +110,8 @@
   {
     "_id": "nbCreateJupyterLab",
     "description": "Create Notebook JupyterLab",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-jupyterlab"
     ],
@@ -102,6 +122,8 @@
   {
     "_id": "nbCreateSuperset",
     "description": "Create Notebook Superset",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-superset"
     ],
@@ -112,6 +134,8 @@
   {
     "_id": "nbCreateRstudio",
     "description": "Create Notebook RStudio",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-rstudio"
     ],
@@ -122,6 +146,8 @@
   {
     "_id": "nbCreateTensor",
     "description": "Create Notebook Jupyter with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-tensor"
     ],
@@ -132,6 +158,8 @@
   {
     "_id": "nbCreateTensorRstudio",
     "description": "Create Notebook RStudio with TensorFlow",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-tensor-rstudio"
     ],
@@ -142,6 +170,8 @@
   {
     "_id": "nbCreateZeppelin",
     "description": "Create Notebook Apache Zeppelin",
+    "type": "NOTEBOOK",
+    "cloud": "GCP",
     "exploratories": [
       "docker.dlab-zeppelin"
     ],
@@ -152,6 +182,8 @@
   {
     "_id": "nbCreateDataEngine",
     "description": "Create Data Engine",
+    "type": "COMPUTATIONAL",
+    "cloud": "GCP",
     "computationals": [
       "docker.dlab-dataengine"
     ],
@@ -162,6 +194,8 @@
   {
     "_id": "nbCreateDataEngineService",
     "description": "Create Data Engine Service",
+    "type": "COMPUTATIONAL",
+    "cloud": "GCP",
     "computationals": [
       "docker.dlab-dataengine-service"
     ],
@@ -172,6 +206,8 @@
   {
     "_id": "compShapes_n1-standard-2_fetching",
     "description": "Use n1-standard-2 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-standard-2"
     ],
@@ -182,6 +218,8 @@
   {
     "_id": "compShapes_n1-highmem-4_fetching",
     "description": "Use n1-highmem-4 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highmem-4"
     ],
@@ -192,6 +230,8 @@
   {
     "_id": "compShapes_n1-highmem-16_fetching",
     "description": "Use n1-highmem-16 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highmem-16"
     ],
@@ -202,6 +242,8 @@
   {
     "_id": "compShapes_n1-highmem-32_fetching",
     "description": "Use n1-highmem-32 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highmem-32"
     ],
@@ -212,6 +254,8 @@
   {
     "_id": "compShapes_n1-highcpu-8_fetching",
     "description": "Use n1-highcpu-8 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highcpu-8"
     ],
@@ -222,6 +266,8 @@
   {
     "_id": "compShapes_n1-highcpu-2_fetching",
     "description": "Use n1-highcpu-2 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highcpu-2"
     ],
@@ -232,6 +278,8 @@
   {
     "_id": "compShapes_n1-highcpu-32_fetching",
     "description": "Use n1-highcpu-32 instance shape for cluster",
+    "type": "COMPUTATIONAL_SHAPE",
+    "cloud": "GCP",
     "computational_shapes": [
       "n1-highcpu-32"
     ],
@@ -242,6 +290,8 @@
   {
     "_id": "nbBillingReportFull",
     "description": "View full billing report for all users",
+    "type": "BILLING",
+    "cloud": "GCP",
     "pages": [
       "/api/infrastructure_provision/billing"
     ],
@@ -250,11 +300,12 @@
     ]
   },
   {
-    "_id": "admin",
-    "description": "Allow to execute administration operation",
+    "_id": "projectAdmin",
+    "description": "Allow to execute administration operation per project",
+    "type": "ADMINISTRATION",
+    "cloud": "GCP",
     "pages": [
       "environment/*",
-      "/api/infrastructure/backup",
       "/roleManagement",
       "/api/settings",
       "/user/settings",
@@ -264,5 +315,26 @@
     "groups": [
       "$anyuser"
     ]
+  },
+  {
+    "_id": "admin",
+    "description": "Allow to execute administration operation",
+    "type": "ADMINISTRATION",
+    "cloud": "GCP",
+    "pages": [
+      "environment/*",
+      "/api/infrastructure/backup",
+      "/roleManagement",
+      "/roleManagement/create",
+      "/roleManagement/delete",
+      "/api/settings",
+      "/user/settings",
+      "/api/project",
+      "/api/project/create",
+      "/api/endpoint"
+    ],
+    "groups": [
+      "$anyuser"
+    ]
   }
 ]
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
index ba8e6f7..74ff5af 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
@@ -19,7 +19,7 @@
 
 <div id="dialog-box" class="manage-env-dialog">
   <header class="dialog-header">
-    <h4 class="modal-title">Manage environment</h4>
+    <h4 class="modal-title">Manage DLab quotas</h4>
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </header>
   <div class="dialog-content">
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
index ced278d..631e7ae 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
@@ -77,7 +77,8 @@
             <span [hidden]="filtering && filterForm.statuses.length > 0 && !collapsedFilterRow">more_vert</span>
           </i>
         </button> </th>
-      <td mat-cell *matCellDef="let element" class="ani status" ngClass="{{element.status || ''}}">{{ element.status }}
+      <td mat-cell *matCellDef="let element" class="ani status" >
+        <span ngClass="{{element.status || ''}}">{{ element.status }}</span>
       </td>
     </ng-container>
 
@@ -121,7 +122,9 @@
     </ng-container>
 
     <ng-container matColumnDef="actions">
-      <th mat-header-cell *matHeaderCellDef class="actions"></th>
+      <th mat-header-cell *matHeaderCellDef class="actions">
+        <span class="label"> Actions </span>
+      </th>
       <td mat-cell *matCellDef="let element" class="settings actions-col">
         <span #settings class="actions" (click)="actions.toggle($event, settings)" *ngIf="element.type !== 'edge node'"
           [ngClass]="{
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
index 87bef73..6c52559 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
@@ -50,13 +50,17 @@
     .settings {
       padding-right: 14px;
 
-      .actions {
-        margin-top: 2px;
+
+    }
+    .actions {
+      margin-top: 0px;
+      .label{
+        padding-right: 5px;
       }
     }
-
     .actions-col {
       width: 6%;
+
     }
 
     .dashboard_table_body {
@@ -86,19 +90,21 @@
     height: auto;
     .label {
       display: inline-block;
-      padding-top: 10px;
+      padding-top: 14px;
       vertical-align: super !important;
       padding-left: 5px;
-      font-size: 11px;
+      font-size: 12px;
+    }
+    .actions {
+      text-align: right;
+      .label {
+        display: inline-block;
+        padding-top: 11px;
+      }
     }
   }
 
   .filter-row {
     background: inherit;
   }
-
-  .actions {
-    text-align: right;
-  }
-
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
index 7f2d728..4c4bdae 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
@@ -27,7 +27,7 @@
         <i class="material-icons"></i>SSN Monitor
       </button> -->
       <button mat-raised-button class="butt env" (click)="openManageEnvironmentDialog()">
-        <i class="material-icons"></i>Manage environment
+        <i class="material-icons"></i>Manage DLab quotas
       </button>
       <!-- <button mat-raised-button class="butt" (click)="showBackupDialog()" [disabled]="creatingBackup">
         <i class="material-icons">backup</i>Backup
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
index da2122b..87e554d 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
@@ -48,7 +48,7 @@
 export class ManagementComponent implements OnInit {
   public user: string = '';
   public healthStatus: GeneralEnvironmentStatus;
-  public anyEnvInProgress: boolean = false;
+  // public anyEnvInProgress: boolean = false;
   public dialogRef: any;
 
   constructor(
@@ -78,6 +78,7 @@
       .environmentManagement(
         $event.environment.user,
         $event.action,
+        $event.environment.project,
         $event.environment.type === 'edge node' ? 'edge' : $event.environment.name,
         $event.resource ? $event.resource.computational_name : null
       ).subscribe(
@@ -85,9 +86,9 @@
         error => this.toastr.error('Environment management failed!', 'Oops!'));
   }
 
-  showBackupDialog() {
-    this.dialog.open(BackupDilogComponent, { panelClass: 'modal-sm' });
-  }
+  // showBackupDialog() {
+  //   this.dialog.open(BackupDilogComponent, { panelClass: 'modal-sm' });
+  // }
 
   showEndpointsDialog() {
     this.dialog.open(EndpointsComponent, { panelClass: 'modal-xl-s' })
@@ -95,25 +96,24 @@
   }
 
   openManageEnvironmentDialog() {
-    this.projectService.getProjectsManagingList().subscribe(projectsList => {
+    this.projectService.getProjectsList().subscribe(projectsList => {
       this.getTotalBudgetData().subscribe(total => {
         this.dialogRef = this.dialog.open(ManageEnvironmentComponent, { data: { projectsList, total }, panelClass: 'modal-sm' });
-        // this.dialogRef.componentInstance.manageEnv.subscribe((data) => this.manageEnvironment(data));
         this.dialogRef.afterClosed().subscribe(result => result && this.setBudgetLimits(result));
       }, () => this.toastr.error('Failed users list loading!', 'Oops!'));
     });
   }
 
-  openSsnMonitorDialog() {
-    this.dialog.open(SsnMonitorComponent, { panelClass: 'modal-lg' });
-  }
-
-  isEnvironmentsInProgress(exploratory): boolean {
-    return exploratory.some(item => {
-      return item.exploratory.some(el => el.status === 'creating' || el.status === 'starting' ||
-        el.resources.some(elem => elem.status === 'creating' || elem.status === 'starting' || elem.status === 'configuring'));
-    });
-  }
+  // openSsnMonitorDialog() {
+  //   this.dialog.open(SsnMonitorComponent, { panelClass: 'modal-lg' });
+  // }
+  //
+  // isEnvironmentsInProgress(exploratory): boolean {
+  //   return exploratory.some(item => {
+  //     return item.exploratory.some(el => el.status === 'creating' || el.status === 'starting' ||
+  //       el.resources.some(elem => elem.status === 'creating' || elem.status === 'starting' || elem.status === 'configuring'));
+  //   });
+  // }
 
   setBudgetLimits($event) {
     this.projectService.updateProjectsBudget($event.projects).subscribe((result: any) => {
@@ -136,37 +136,37 @@
   //       .subscribe(() => this.handleSuccessAction(event.action), error => this.toastr.error(error.message, 'Oops!'));
   // }
 
-  handleSuccessAction(action) {
-    this.toastr.success(`Action ${action} is processing!`, 'Processing!');
-    this.projectService.getProjectsManagingList().subscribe(data => {
-      this.dialogRef.componentInstance.data.projectsList = data;
-      this.dialogRef.componentInstance.setProjectsControl();
-    });
-    this.buildGrid();
-  }
+  // handleSuccessAction(action) {
+  //   this.toastr.success(`Action ${action} is processing!`, 'Processing!');
+  //   this.projectService.getProjectsManagingList().subscribe(data => {
+  //     this.dialogRef.componentInstance.data.projectsList = data;
+  //     this.dialogRef.componentInstance.setProjectsControl();
+  //   });
+  //   this.buildGrid();
+  // }
+  //
+  // get creatingBackup(): boolean {
+  //   return this.backupService.inProgress;
+  // }
 
-  get creatingBackup(): boolean {
-    return this.backupService.inProgress;
-  }
-
-  private getExploratoryList() {
-    this.userResourceService.getUserProvisionedResources()
-      .subscribe((result) => this.anyEnvInProgress = this.isEnvironmentsInProgress(
-        ExploratoryModel.loadEnvironments(result)));
-  }
+  // private getExploratoryList() {
+  //   this.userResourceService.getUserProvisionedResources()
+  //     .subscribe((result) => this.anyEnvInProgress = this.isEnvironmentsInProgress(
+  //       ExploratoryModel.loadEnvironments(result)));
+  // }
 
   private getEnvironmentHealthStatus() {
     this.healthStatusService
       .getEnvironmentStatuses()
       .subscribe((status: GeneralEnvironmentStatus) => {
         this.healthStatus = status;
-        this.getExploratoryList();
+        // this.getExploratoryList();
       });
   }
 
-  private getActiveUsersList() {
-    return this.healthStatusService.getActiveUsers();
-  }
+  // private getActiveUsersList() {
+  //   return this.healthStatusService.getActiveUsers();
+  // }
 
   private getTotalBudgetData() {
     return this.healthStatusService.getTotalBudgetData();
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts
index 4e7a663..b4f0701 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.model.ts
@@ -69,6 +69,7 @@
 
 export interface GeneralEnvironmentStatus {
   admin: boolean;
+  projectAdmin: boolean;
   billingEnabled: boolean;
   billingQuoteUsed: number;
   list_resources: any;
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
index e21990d..014c89b 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
@@ -26,9 +26,12 @@
 
 @Injectable()
 export class ProjectDataService {
-  _projects = new BehaviorSubject<any>(null);
-  endpointsList: any;
-  constructor(private projectService: ProjectService, private endpointService: EndpointService) {
+  public _projects = new BehaviorSubject<any>(null);
+  private endpointsList: any = [];
+  constructor(
+    private projectService: ProjectService,
+    private endpointService: EndpointService
+  ) {
     this.getProjectsList();
   }
 
@@ -41,7 +44,7 @@
     this.projectService.getProjectsList()
       .pipe(
         mergeMap ((response: Project[]) => {
-            if (response) {
+            if (response && this.endpointsList.length) {
               response.forEach(project => project.endpoints.forEach(endpoint => {
                 const filtredEndpoints =  this.endpointsList.filter(v => v.name === endpoint.name);
                 if (filtredEndpoints.length) {
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
index 3983ab1..d8f697f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
@@ -63,7 +63,9 @@
   </ng-container>
 
   <ng-container matColumnDef="actions">
-    <th mat-header-cell *matHeaderCellDef class="project-actions"></th>
+    <th mat-header-cell *matHeaderCellDef class="project-actions">
+      <span class="label"> Actions </span>
+    </th>
     <td mat-cell *matCellDef="let element" class="settings">
       <span #settings (click)="actions.toggle($event, settings)" class="actions"></span>
       <bubble-up #actions class="list-menu" position="bottom-left" alternative="top-left">
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
index 73b725d..efe9ba3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
@@ -79,7 +79,13 @@
     vertical-align: top;
     padding: 10px 24px;
 
-    span {
+    &.mat-header-cell{
+      padding-top: 19px;
+      padding-right: 13px;
+      color: rgba(0,0,0,.54);
+    }
+
+    span:not(.mat-header-cell span) {
       transition: all .5s ease-in-out;
       cursor: pointer;
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
index b4ba7df..3be7a10 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
@@ -20,8 +20,12 @@
 
 <div *ngIf="projectList" class="base-retreat">
   <div class="sub-nav">
-    <div>
-      <button mat-raised-button class="butt butt-create" (click)="createProject()" [disabled]="!projectList.length">
+    <div
+      matTooltip="Only admin can create new project."
+      matTooltipPosition="above"
+      [matTooltipDisabled]="healthStatus?.admin"
+    >
+      <button mat-raised-button class="butt butt-create" (click)="createProject()" [disabled]="!projectList.length || !healthStatus?.admin && healthStatus?.projectAdmin">
         <i class="material-icons">add</i>Create new
       </button>
     </div>
@@ -47,8 +51,7 @@
   </mat-card>
 
   <div [hidden]="!projectList.length">
-    <project-list (editItem)="editProject($event)" (deleteItem)="deleteProject($event)"
-      (toggleStatus)="toggleStatus($event)">
+    <project-list (editItem)="editProject($event)" (toggleStatus)="toggleStatus($event)">
     </project-list>
   </div>
 </div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
index ba3d45a..9833a40 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
@@ -23,9 +23,10 @@
 import { ToastrService } from 'ngx-toastr';
 
 import { ProjectDataService } from './project-data.service';
-import { HealthStatusService, ProjectService } from '../../core/services';
+import {HealthStatusService, ProjectService, UserResourceService} from '../../core/services';
 import { NotificationDialogComponent } from '../../shared/modal-dialog/notification-dialog';
 import { ProjectListComponent } from './project-list/project-list.component';
+import {ExploratoryModel} from '../../resources/resources-grid/resources-grid.model';
 
 export interface Endpoint {
   name: string;
@@ -50,6 +51,7 @@
   projectList: Project[] = [];
   healthStatus: any;
   activeFiltering: boolean = false;
+  resources: any = [];
 
   private subscriptions: Subscription = new Subscription();
 
@@ -60,7 +62,8 @@
     public toastr: ToastrService,
     private projectService: ProjectService,
     private projectDataService: ProjectDataService,
-    private healthStatusService: HealthStatusService
+    private healthStatusService: HealthStatusService,
+    private userResourceService: UserResourceService
   ) { }
 
   ngOnInit() {
@@ -70,12 +73,20 @@
         if (value) this.projectList = value;
       }));
     this.refreshGrid();
+    this.getResources();
   }
 
   ngOnDestroy() {
     this.subscriptions.unsubscribe();
   }
 
+  private getResources() {
+    this.userResourceService.getUserProvisionedResources()
+      .subscribe((result: any) => {
+        this.resources = ExploratoryModel.loadEnvironments(result);
+      });
+  }
+
   refreshGrid() {
     this.projectDataService.updateProjects();
     this.activeFiltering = false;
@@ -110,6 +121,28 @@
   }
 
   private toggleStatusRequest(data, action) {
+    if ( action === 'terminate') {
+      const projectsResources = this.resources
+        .filter(resource => resource.project === data.project_name )[0].exploratory
+        .filter(expl => expl.status !== 'terminated' && expl.status !== 'terminating');
+
+      let termResources = [];
+      data.endpoint.forEach(v => {
+        termResources = [...termResources, ...projectsResources.filter(resource => resource.endpoint === v)];
+      });
+
+      this.dialog.open(NotificationDialogComponent, { data: {
+        type: 'terminateNode', item: {action: data, resources: termResources.map(resource => resource.name)}
+        }, panelClass: 'modal-sm' })
+        .afterClosed().subscribe(result => {
+        result && this.edgeNodeAction(data, action);
+      });
+    } else {
+      this.edgeNodeAction(data, action);
+    }
+  }
+
+  private edgeNodeAction(data, action) {
     this.projectService.toggleProjectStatus(data, action).subscribe(() => {
       this.refreshGrid();
       this.toastr.success(`Edge node ${this.toEndpointAction(action)} is in progress!`, 'Processing!');
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
index af111c2..87e27a2 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
@@ -19,8 +19,12 @@
 
 <div class="manage-roles base-retreat">
   <div class="sub-nav">
-    <div>
-      <button mat-raised-button class="butt add-group" (click)="stepperView = !stepperView">
+    <div matTooltip="Only admin can add group."
+         matTooltipPosition="above"
+         [matTooltipDisabled]="healthStatus?.admin"
+    >
+      <button mat-raised-button class="butt add-group" (click)="stepperView = !stepperView" [disabled]="!healthStatus?.admin"
+              >
         <i class="material-icons">people_outline</i>Add group
       </button>
     </div>
@@ -45,6 +49,7 @@
               class="material-icons">keyboard_arrow_right</i></button>
         </div>
       </mat-step>
+
       <mat-step [completed]='false'>
         <ng-template matStepLabel>Users</ng-template>
         <div class="inner-step mat-reset">
@@ -58,35 +63,17 @@
               class="material-icons">keyboard_arrow_right</i></button>
         </div>
       </mat-step>
+
       <mat-step [completed]='false'>
         <ng-template matStepLabel>Roles</ng-template>
         <div class="inner-step mat-reset roles">
           <div class="selector-wrapper">
-            <mat-form-field>
-              <mat-select
-                multiple [compareWith]="compareObjects"
-                name="roles"
-                [(value)]="setupRoles"
-                disableOptionCentering
-                placeholder="Select roles"
-                panelClass="select-role"
-              >
-                <mat-option class="multiple-select" disabled>
-                  <a class="select ani" (click)="selectAllOptions(setupRoles, rolesList)">
-                    <i class="material-icons">playlist_add_check</i>&nbsp;All
-                  </a>
-                  <a class="deselect ani" (click)="selectAllOptions(setupRoles)">
-                    <i class="material-icons">clear</i>&nbsp;None
-                  </a>
-                </mat-option>
-                <mat-option *ngFor="let role of rolesList" [value]="role">
-                  {{ role }}
-                </mat-option>
-              </mat-select>
-              <button class="caret">
-                <i class="material-icons">keyboard_arrow_down</i>
-              </button>
-            </mat-form-field>
+            <multi-level-select-dropdown
+              (selectionChange)="onUpdate($event)"
+              name="roles"
+              [items]="rolesList"
+              [model]="setupRoles">
+            </multi-level-select-dropdown>
           </div>
         </div>
         <div class="text-center m-bott-10">
@@ -94,9 +81,10 @@
               class="material-icons">keyboard_arrow_left</i>Back</button>
           <button mat-raised-button (click)="resetDialog()" class="butt">Cancel</button>
           <button mat-raised-button (click)="manageAction('create', 'group')" class="butt butt-success"
-            [disabled]="!setupGroup || setupGroupName.errors?.patterns || setupGroupName.errors?.duplicate || !setupRoles.length > 0">Create</button>
+            [disabled]="!setupGroup || setupGroupName.errors?.patterns || setupGroupName.errors?.duplicate || !setupRoles.length">Create</button>
         </div>
       </mat-step>
+
     </mat-horizontal-stepper>
   </mat-card>
   <mat-divider></mat-divider>
@@ -112,27 +100,13 @@
         <th mat-header-cell *matHeaderCellDef class="roles"> Roles </th>
         <td mat-cell *matCellDef="let element" class="roles">
           <div class="inner-step mat-reset">
-            <div class="selector-wrapper-edit">
-              <mat-form-field class="select">
-                <mat-select multiple [compareWith]="compareObjects" name="selected_roles" disableOptionCentering
-                  [(value)]="element.selected_roles" placeholder="Select roles" class="roles-select" panelClass="select-role">
-                  <mat-option class="multiple-select" disabled>
-                    <a class="select ani" (click)="selectAllOptions(element, rolesList, 'selected_roles')">
-                      <i class="material-icons">playlist_add_check</i>&nbsp;All
-                    </a>
-                    <a class="deselect ani" (click)="selectAllOptions(element, null, 'selected_roles')">
-                      <i class="material-icons">clear</i>&nbsp;None
-                    </a>
-                  </mat-option>
-                  <mat-option *ngFor="let role of rolesList" [value]="role">
-                    {{ role }}
-                  </mat-option>
-                </mat-select>
-                <button class="caret">
-                  <i class="material-icons">keyboard_arrow_down</i>
-                </button>
-              </mat-form-field>
-            </div>
+              <multi-level-select-dropdown
+                (selectionChange)="onUpdate($event)"
+                [type]="element.group"
+                [items]="rolesList"
+                [model]="element.selected_roles">
+
+              </multi-level-select-dropdown>
           </div>
         </td>
       </ng-container>
@@ -161,15 +135,29 @@
       <ng-container matColumnDef="actions">
         <th mat-header-cell *matHeaderCellDef class="actions"></th>
         <td mat-cell *matCellDef="let element" class="actions">
-          <span (click)="manageAction('delete', 'group', element)" class="reset ani">
-            <mat-icon>delete_forever</mat-icon>
-          </span>
-          <span class="apply ani" matTooltip="Group cannot be updated without any selected role"
-            matTooltipPosition="above" [matTooltipDisabled]="element.selected_roles.length > 0"
-            [ngClass]="{ 'not-allowed' : !element.selected_roles.length }"
-            (click)="manageAction('update', 'group', element)">
-            <mat-icon>done</mat-icon>
-          </span>
+          <div class="actions-wrapper">
+            <span class="action-disabled"
+              matTooltip="Only admin can delete group."
+              matTooltipPosition="above"
+              [matTooltipDisabled]="healthStatus?.admin"
+            >
+              <span
+                (click)="manageAction('delete', 'group', element)"
+                class="reset ani"
+                [ngClass]="{ 'not-allowed' : !healthStatus?.admin}"
+
+              >
+              <mat-icon >delete_forever</mat-icon>
+            </span>
+            </span>
+
+            <span class="apply ani big-icon" matTooltip="Group cannot be updated without any selected role"
+              matTooltipPosition="above" [matTooltipDisabled]="element.selected_roles.length > 0"
+              [ngClass]="{ 'not-allowed' : !element.selected_roles.length || isGroupChanded(element)}"
+              (click)="manageAction('update', 'group', element)">
+              <mat-icon [ngClass]="{'big-icon': !isGroupChanded(element) && element.selected_roles.length}">done</mat-icon>
+            </span>
+          </div>
         </td>
       </ng-container>
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
index dd14655..1167084 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
@@ -88,17 +88,23 @@
   }
 }
 
+.mat-horizontal-content-container{
+  overflow: visible !important;
+}
+
 .selector-wrapper {
   display: flex;
   align-self: center;
   width: 490px;
   height: 36px;
-  padding-left: 10px;
+  padding-left: 0;
   font-family: 'Open Sans', sans-serif;
   font-size: 15px;
   font-weight: 300;
   box-shadow: 0 3px 1px -2px rgba(0, 0, 0, 0.2), 0 2px 2px 0 rgba(0, 0, 0, 0.14), 0 1px 5px 0 rgba(0, 0, 0, 0.12);
-
+  multi-level-select-dropdown{
+    width: 100%;
+  }
   mat-form-field {
     width: 100%;
 
@@ -137,7 +143,6 @@
 }
 
 .roles {
-  // width: 30%;
 
   .selector-wrapper-edit {
     position: relative;
@@ -198,6 +203,7 @@
   }
 }
 
+
 .expanded-panel {
   display: flex;
   align-items: flex-end;
@@ -343,11 +349,11 @@
   }
 
   .roles {
-    width: 30%;
+    width: 35%;
   }
 
   .users {
-    width: 40%;
+    width: 35%;
   }
 
   .actions {
@@ -355,13 +361,29 @@
     width: 10%;
     text-align: center;
 
+    .actions-wrapper{
+      height: 41px;
+      display: flex;
+      align-items: center;
+      justify-content: flex-end;
+    }
+
     span {
-      transition: all .5s ease-in-out;
+      transition: all .35s ease-in-out;
       cursor: pointer;
 
+      &.action-disabled{
+        cursor: not-allowed;
+      }
+
       .mat-icon {
         font-size: 18px;
         padding-top: 12px;
+        &.big-icon{
+          font-size: 25px;
+          padding-top: 10px;
+          transition: .25s;
+        }
       }
 
       &:hover {
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
index 8afec35..bf57438 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
@@ -21,8 +21,7 @@
 import { ValidatorFn, FormControl } from '@angular/forms';
 import { MatDialog, MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog';
 import { ToastrService } from 'ngx-toastr';
-
-import { RolesGroupsService, HealthStatusService } from '../../core/services';
+import {RolesGroupsService, HealthStatusService, ApplicationSecurityService, AppRoutingService} from '../../core/services';
 import { CheckUtils } from '../../core/util';
 import { DICTIONARY } from '../../../dictionary/global.dictionary';
 import {ProgressBarService} from '../../core/services/progress-bar.service';
@@ -38,11 +37,11 @@
 
   public groupsData: Array<any> = [];
   public roles: Array<any> = [];
-  public rolesList: Array<string> = [];
+  public rolesList: Array<any> = [];
   public setupGroup: string = '';
   public setupUser: string = '';
   public manageUser: string = '';
-  public setupRoles: Array<string> = [];
+  public setupRoles: Array<any> = [];
   public updatedRoles: Array<string> = [];
   public healthStatus: any;
   public delimitersRegex = /[-_]?/g;
@@ -51,7 +50,7 @@
   stepperView: boolean = false;
   displayedColumns: string[] = ['name', 'roles', 'users', 'actions'];
   @Output() manageRolesGroupAction: EventEmitter<{}> = new EventEmitter();
-  private startedGroups;
+  private startedGroups: Array<any>;
 
   constructor(
     public toastr: ToastrService,
@@ -59,10 +58,11 @@
     private rolesService: RolesGroupsService,
     private healthStatusService: HealthStatusService,
     private progressBarService: ProgressBarService,
+    private applicationSecurityService: ApplicationSecurityService,
+    private appRoutingService: AppRoutingService,
   ) { }
 
   ngOnInit() {
-    this.openManageRolesDialog();
     this.getEnvironmentHealthStatus();
   }
 
@@ -72,9 +72,12 @@
       this.rolesService.getRolesData().subscribe(
         (roles: any) => {
           this.roles = roles;
-          this.rolesList = roles.map(role => role.description);
+          this.rolesList = roles.map((role) => {
+              return {role: role.description, type: role.type, cloud: role.cloud};
+          });
+          this.rolesList = this.rolesList.sort((a, b) => (a.cloud > b.cloud) ? 1 : ((b.cloud > a.cloud) ? -1 : 0));
+          this.rolesList = this.rolesList.sort((a, b) => (a.type > b.type) ? 1 : ((b.type > a.type) ? -1 : 0));
           this.updateGroupData(groups);
-          this.getGroupsListCopy();
           this.stepperView = false;
         },
         error => this.toastr.error(error.message, 'Oops!'));
@@ -103,7 +106,7 @@
           action, type, value: {
             name: this.setupGroup,
             users: this.setupUser ? this.setupUser.split(',').map(elem => elem.trim()) : [],
-            roleIds: this.extractIds(this.roles, this.setupRoles)
+            roleIds: this.extractIds(this.roles, this.setupRoles.map(v => v.role))
           }
         });
       this.stepperView = false;
@@ -123,64 +126,69 @@
         }
       });
     } else if (action === 'update') {
-      this.manageRolesGroups({
-        action, type, value: {
-          name: item.group,
-          roleIds: this.extractIds(this.roles, item.selected_roles),
-          users: item.users || []
+      const currGroupSource = this.startedGroups.filter(cur => cur.group === item.group)[0];
+      let deletedUsers = currGroupSource.users.filter(user => {
+        return !item.users.includes(user);
+      });
+      this.dialog.open(ConfirmationDialogComponent, { data:
+          { notebook: deletedUsers, type: ConfirmationDialogType.deleteUser }, panelClass: 'modal-sm' })
+        .afterClosed().subscribe((res) => {
+        if (!res) {
+          item.users = [...currGroupSource.users];
+          item.selected_roles = [...currGroupSource.selected_roles];
+          item.roles = [...currGroupSource.roles];
+        } else {
+          this.manageRolesGroups({
+            action, type, value: {
+              name: item.group,
+              roleIds: this.extractIds(this.roles, item.selected_roles.map(v => v.role)),
+              users: item.users || []
+            }
+          });
         }
+        deletedUsers = [];
       });
     }
-    this.getEnvironmentHealthStatus();
     this.resetDialog();
   }
 
   public manageRolesGroups($event) {
     switch ($event.action) {
       case 'create':
-        this.rolesService.setupNewGroup($event.value).subscribe(res => {
+        this.rolesService.setupNewGroup($event.value).subscribe(() => {
           this.toastr.success('Group creation success!', 'Created!');
           this.getGroupsData();
         }, () => this.toastr.error('Group creation failed!', 'Oops!'));
         break;
+
       case 'update':
-        const currGroup =  this.startedGroups.filter(group => group.group === $event.value.name);
-        const deletedUsers = currGroup[0].users.filter(user => {
-          if ($event.value.users.includes(user)) {
-            return false;
+        this.rolesService.updateGroup($event.value).subscribe(() => {
+          this.toastr.success(`Group data is updated successfully!`, 'Success!');
+          if (!$event.value.roleIds.includes('admin' || 'projectAdmin')) {
+            this.applicationSecurityService.isLoggedIn().subscribe(() => {
+              this.getEnvironmentHealthStatus();
+            });
           } else {
-            return true;
+            this.openManageRolesDialog();
           }
-        });
-        if (deletedUsers.length) {
-          this.dialog.open(ConfirmationDialogComponent, { data:
-              { notebook: deletedUsers, type: ConfirmationDialogType.deleteUser }, panelClass: 'modal-sm' })
-            .afterClosed().subscribe((res) => {
-            if (!res) {
-              $event.value.users = $event.value.users.concat(deletedUsers);
-              this.updateGroup($event.value, ', but users did\'t delete from group');
-            } else {
-              this.updateGroup($event.value, '');
-            }
-          });
-        } else {
-          this.updateGroup($event.value, '');
-        }
+        }, (re) => this.toastr.error('Failed group data updating!', 'Oops!'));
 
         break;
+
       case 'delete':
         if ($event.type === 'users') {
-          this.rolesService.removeUsersForGroup($event.value).subscribe(res => {
+          this.rolesService.removeUsersForGroup($event.value).subscribe(() => {
             this.toastr.success('Users was successfully deleted!', 'Success!');
             this.getGroupsData();
           }, () => this.toastr.error('Failed users deleting!', 'Oops!'));
         } else if ($event.type === 'group') {
-          this.rolesService.removeGroupById($event.value).subscribe(res => {
+          this.rolesService.removeGroupById($event.value).subscribe(() => {
             this.toastr.success('Group was successfully deleted!', 'Success!');
             this.getGroupsData();
           }, (error) => this.toastr.error(error.message, 'Oops!'));
         }
         break;
+
       default:
     }
   }
@@ -192,22 +200,21 @@
     }, []);
   }
 
-  public updateGroup(value, extraMesssage){
-    this.rolesService.updateGroup(value).subscribe(res => {
-      this.toastr.success(`Group data successfully updated${extraMesssage}!`, 'Success!');
-      this.getGroupsData();
-    }, () => this.toastr.error('Failed group data updating!', 'Oops!'));
-  }
-
   public updateGroupData(groups) {
-    this.groupsData = groups.map(v => v).sort((a, b) => (a.group > b.group) ? 1 : ((b.group > a.group) ? -1 : 0));
+    this.groupsData = groups.map(v => {
+      if (!v.users) {
+        v.users = [];
+      }
+      return v;
+    }).sort((a, b) => (a.group > b.group) ? 1 : ((b.group > a.group) ? -1 : 0));
     this.groupsData.forEach(item => {
-      item.selected_roles = item.roles.map(role => role.description);
+      item.selected_roles = item.roles.map(role => ({role: role.description, type: role.type, cloud: role.cloud}));
     });
+    this.getGroupsListCopy();
   }
 
   private getGroupsListCopy() {
-    this.startedGroups = this.groupsData.map(env => JSON.parse(JSON.stringify(env)));
+    this.startedGroups = JSON.parse(JSON.stringify(this.groupsData));
   }
 
   public groupValidarion(): ValidatorFn {
@@ -224,8 +231,19 @@
     });
   }
 
-  public compareObjects(o1: any, o2: any): boolean {
-    return o1.toLowerCase() === o2.toLowerCase();
+  private isGroupChanded(currGroup) {
+    const currGroupSource = this.startedGroups.filter(cur => cur.group === currGroup.group)[0];
+   if (currGroup.users.length !== currGroupSource.users.length &&
+     currGroup.selected_roles.length !== currGroupSource.selected_roles.length) {
+     return false;
+   }
+   return JSON.stringify(currGroup.users) === JSON.stringify(currGroupSource.users) &&
+     JSON.stringify(
+       currGroup.selected_roles.map(role => role.role).sort()
+     ) === JSON
+       .stringify(
+         currGroupSource.selected_roles.map(role => role.role).sort()
+       );
   }
 
   public resetDialog() {
@@ -249,7 +267,23 @@
 
   private getEnvironmentHealthStatus() {
     this.healthStatusService.getEnvironmentHealthStatus()
-      .subscribe((result: any) => this.healthStatus = result);
+      .subscribe((result: any) => {
+        this.healthStatus = result;
+          if (!this.healthStatus.admin && !this.healthStatus.projectAdmin) {
+            this.appRoutingService.redirectToHomePage();
+          } else {
+            this.openManageRolesDialog();
+          }
+      }
+      );
+  }
+
+  public onUpdate($event): void {
+   if ($event.type) {
+     this.groupsData.filter(group => group.group === $event.type)[0].selected_roles = $event.model;
+   } else {
+     this.setupRoles = $event.model;
+   }
   }
 }
 
@@ -273,6 +307,7 @@
   `,
   styles: [`.group-name { max-width: 96%; display: inline-block; vertical-align: bottom; }`]
 })
+
 export class ConfirmDeleteUserAccountDialogComponent {
   constructor(
     public dialogRef: MatDialogRef<ConfirmDeleteUserAccountDialogComponent>,
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
index a8fbdc0..75d2087 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
@@ -47,6 +47,7 @@
   private static readonly IMAGE = 'image';
   private static readonly SCHEDULER = 'scheduler';
   private static readonly TEMPLATES = 'templates';
+  private static readonly COMPUTATION_TEMPLATES = 'computation_templates';
   private static readonly COMPUTATIONAL_RESOURCES_TEMLATES = 'computational_templates';
   private static readonly COMPUTATIONAL_RESOURCES = 'computational_resources';
   private static readonly COMPUTATIONAL_RESOURCES_DATAENGINE = 'computational_resources_dataengine';
@@ -181,6 +182,12 @@
       null);
   }
 
+  public buildGetComputationTemplatesRequest(params, provider): Observable<any> {
+    return this.buildRequest(HTTPMethod.GET,
+      '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATION_TEMPLATES) + params,
+      null);
+  }
+
   public buildCreateExploratoryEnvironmentRequest(data): Observable<any> {
     return this.buildRequest(HTTPMethod.PUT,
       this.requestRegistry.Item(ApplicationServiceFacade.EXPLORATORY_ENVIRONMENT),
@@ -411,16 +418,6 @@
       null);
   }
 
-  public buildManageEnvironment(action, data): Observable<any> {
-    return this.buildRequest(HTTPMethod.POST,
-      this.requestRegistry.Item(ApplicationServiceFacade.ENV) + action,
-      data,
-      {
-        observe: 'response',
-        headers: { 'Content-Type': 'text/plain' }
-      });
-  }
-
   public buildGetAllEnvironmentData(): Observable<any> {
     return this.buildRequest(HTTPMethod.GET,
       this.requestRegistry.Item(ApplicationServiceFacade.FULL_ACTIVE_LIST),
@@ -559,12 +556,6 @@
       null);
   }
 
-  public buildDeleteProject(param): Observable<any> {
-    return this.buildRequest(HTTPMethod.DELETE,
-      this.requestRegistry.Item(ApplicationServiceFacade.PROJECT) + param,
-      null);
-  }
-
   public buildToggleProjectStatus(param, data): Observable<any> {
     return this.buildRequest(HTTPMethod.POST,
       this.requestRegistry.Item(ApplicationServiceFacade.PROJECT) + param,
@@ -631,6 +622,8 @@
       '/api/infrastructure_provision/exploratory_environment');
     this.requestRegistry.Add(ApplicationServiceFacade.TEMPLATES,
       '/api/infrastructure_templates');
+    this.requestRegistry.Add(ApplicationServiceFacade.COMPUTATION_TEMPLATES,
+    '/infrastructure_provision/computational_resources');
     this.requestRegistry.Add(ApplicationServiceFacade.IMAGE,
       '/api/infrastructure_provision/exploratory_environment/image');
     this.requestRegistry.Add(ApplicationServiceFacade.SCHEDULER,
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
index fe61c75..5d35eec 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
@@ -28,8 +28,8 @@
 export class DataengineConfigurationService {
   constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
 
-  public getClusterConfiguration(exploratory, cluster, provider): Observable<{}> {
-    const url = `/${exploratory}/${cluster}/config`;
+  public getClusterConfiguration(project, exploratory, cluster, provider): Observable<{}> {
+    const url = `/${project}/${exploratory}/${cluster}/config`;
     return this.applicationServiceFacade
       .buildGetClusterConfiguration(url, provider)
       .pipe(
@@ -37,8 +37,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public editClusterConfiguration(data, exploratory, cluster, provider): Observable<{}> {
-    const url = `/dataengine/${exploratory}/${cluster}/config`;
+  public editClusterConfiguration(data, project, exploratory, cluster, provider): Observable<{}> {
+    const url = `/dataengine/${project}/${exploratory}/${cluster}/config`;
     return this.applicationServiceFacade
       .buildEditClusterConfiguration(url, data, provider)
       .pipe(
@@ -46,8 +46,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getExploratorySparkConfiguration(exploratory): Observable<{}> {
-    const url = `/${exploratory}/cluster/config`;
+  public getExploratorySparkConfiguration(project, exploratory): Observable<{}> {
+    const url = `/${project}/${exploratory}/cluster/config`;
     return this.applicationServiceFacade
       .buildGetExploratorySparkConfiguration(url)
       .pipe(
@@ -55,8 +55,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public editExploratorySparkConfiguration(data, exploratory): Observable<{}> {
-    const url = `/${exploratory}/reconfigure`;
+  public editExploratorySparkConfiguration(data, project, exploratory): Observable<{}> {
+    const url = `/${project}/${exploratory}/reconfigure`;
     return this.applicationServiceFacade
       .buildEditExploratorySparkConfiguration(url, data)
       .pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts
index d593d08..12086bc 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/healthStatus.service.ts
@@ -119,7 +119,7 @@
               this.appRoutingService.redirectToHomePage();
               return false;
             }
-            if (parameter === 'administration' && !data.admin) {
+            if (parameter === 'administration' && !data.admin && !data.projectAdmin) {
               this.appRoutingService.redirectToNoAccessPage();
               return false;
             }
@@ -136,15 +136,6 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public manageEnvironment(act, data): Observable<Response | {}> {
-    const action = `/${act}`;
-    return this.applicationServiceFacade
-      .buildManageEnvironment(action, data)
-      .pipe(
-        map(response => response),
-        catchError(ErrorUtils.handleServiceError));
-  }
-
   public getSsnMonitorData(): Observable<{}> {
     return this.applicationServiceFacade
       .buildGetSsnMonitorData()
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
index d4f8942..2119b1a 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
@@ -28,8 +28,8 @@
 export class LibrariesInstallationService {
   constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
 
-  public getGroupsList(exploratory, computational?): Observable<Response> {
-    let body = `?exploratory_name=${exploratory}`;
+  public getGroupsList(project, exploratory, computational?): Observable<Response> {
+    let body = `?project_name=${project}&exploratory_name=${exploratory}`;
     if (computational) body += `&computational_name=${computational}`;
 
     return this.applicationServiceFacade
@@ -65,8 +65,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getInstalledLibrariesList(exploratory): Observable<{}> {
-    const body = `?exploratory_name=${exploratory}`;
+  public getInstalledLibrariesList(project, exploratory): Observable<{}> {
+    const body = `?project_name=${project}&exploratory_name=${exploratory}`;
 
     return this.applicationServiceFacade
       .buildGetInstalledLibrariesList(body)
@@ -75,8 +75,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getInstalledLibsByResource(exploratory, computational?): Observable<{}> {
-    let body = `?exploratory_name=${exploratory}`;
+  public getInstalledLibsByResource(project, exploratory, computational?): Observable<{}> {
+    let body = `?project_name=${project}&exploratory_name=${exploratory}`;
     if (computational) body += `&computational_name=${computational}`;
 
     return this.applicationServiceFacade
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
index 4b759b6..2d21fd5 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
@@ -36,8 +36,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  environmentManagement(data, action: string, resource: string, computational?: string): Observable<{}> {
-    const params = computational ? `/${action}/${resource}/${computational}` : `/${action}/${resource}`;
+  environmentManagement(data, action: string, project: string, resource: string, computational?: string): Observable<{}> {
+    const params = computational ? `/${action}/${project}/${resource}/${computational}` : `/${action}/${project}/${resource}`;
     return this.applicationServiceFacade
       .buildEnvironmentManagement(params, data)
       .pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
index 805c8e8..ccf93f8 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
@@ -52,15 +52,6 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getProjectsManagingList(): Observable<{}> {
-    const params = '/managing';
-    return this.applicationServiceFacade
-      .buildGetUserProjectsList(params)
-      .pipe(
-        map(response => response),
-        catchError(ErrorUtils.handleServiceError));
-  }
-
   public getUserProjectsList(isActive?): Observable<{}> {
     const params = isActive ? '/me?active=true' : '';
     return this.applicationServiceFacade
@@ -70,15 +61,6 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public deleteProject(data): Observable<{}> {
-    const url = `/${data}`;
-    return this.applicationServiceFacade
-      .buildDeleteProject(url)
-      .pipe(
-        map(response => response),
-        catchError(ErrorUtils.handleServiceError));
-  }
-
   public toggleProjectStatus(data, action): Observable<{}> {
     const url = `/${action}`;
     return this.applicationServiceFacade
@@ -88,15 +70,6 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public stopProjectAction(data): Observable<{}> {
-    const url = `/managing/stop/${data}`;
-    return this.applicationServiceFacade
-      .buildToggleProjectStatus(url, data)
-      .pipe(
-        map(response => response),
-        catchError(ErrorUtils.handleServiceError));
-  }
-
   public updateProjectsBudget(data): Observable<{}> {
     const url = '/budget';
     return this.applicationServiceFacade
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
index c595486..a854305 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
@@ -29,8 +29,8 @@
 export class SchedulerService {
   constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
 
-  public getExploratorySchedule(notebook, resource?): Observable<{}> {
-    const param = resource ? `/${notebook}/${resource}` : `/${notebook}`;
+  public getExploratorySchedule(project, notebook, resource?): Observable<{}> {
+    const param = resource ? `/${project}/${notebook}/${resource}` : `/${project}/${notebook}`;
     return this.applicationServiceFacade
       .buildGetExploratorySchedule(param)
       .pipe(
@@ -38,8 +38,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public setExploratorySchedule(notebook, data, resource?): Observable<ScheduleSchema> {
-    const param = resource ? `/${notebook}/${resource}` : `/${notebook}`;
+  public setExploratorySchedule(project, notebook, data, resource?): Observable<ScheduleSchema> {
+    const param = resource ? `/${project}/${notebook}/${resource}` : `/${project}/${notebook}`;
     return this.applicationServiceFacade
       .buildSetExploratorySchedule(param, data)
       .pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
index a41bd29..6f7c254 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
@@ -37,10 +37,10 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public getComputationalTemplates(project, endpoint): Observable<any> {
-    const url = `/${project}/${endpoint}/computational_templates`;
+  public getComputationalTemplates(project, endpoint, provider): Observable<any> {
+    const url = `/${project}/${endpoint}/templates`;
     return this.applicationServiceFacade
-      .buildGetTemplatesRequest(url)
+      .buildGetComputationTemplatesRequest(url, provider)
       .pipe(
         map(response => response),
         catchError(ErrorUtils.handleServiceError));
@@ -81,7 +81,7 @@
   }
 
   public suspendExploratoryEnvironment(notebook: any, action): Observable<{}> {
-    const url = '/' + notebook.name + '/' + action;
+    const url = '/' + notebook.project + '/' + notebook.name + '/' + action;
 
     return this.applicationServiceFacade
       .buildSuspendExploratoryEnvironmentRequest(JSON.stringify(url))
@@ -108,8 +108,8 @@
         catchError(ErrorUtils.handleServiceError));
   }
 
-  public suspendComputationalResource(notebookName: string, computationalResourceName: string, provider: string): Observable<{}> {
-    const body = JSON.stringify('/' + notebookName + '/' + computationalResourceName + '/terminate');
+  public suspendComputationalResource(projectName: string, notebookName: string, computationalResourceName: string, provider: string): Observable<{}> {
+    const body = JSON.stringify('/' + projectName + '/' + notebookName + '/' + computationalResourceName + '/terminate');
     return this.applicationServiceFacade
       .buildDeleteComputationalResourcesRequest(body, provider)
       .pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
index 0fb0fde..0eb86a4 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
@@ -22,7 +22,7 @@
 
     <ng-container matColumnDef="name">
       <th mat-header-cell *matHeaderCellDef class="env_name">
-        <span class="label"> Environment name </span>
+        <div class="label"><span class="text"> Environment name</span></div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filteredReportData.dlab_id.length > 0; else dlab_id_filtered">filter_list</span>
@@ -36,7 +36,13 @@
 
     <ng-container matColumnDef="user">
       <th mat-header-cell *matHeaderCellDef class="th_user">
-        <span class="label"> User </span>
+        <div class="sort">
+          <div class="sort-arrow up" (click)="sortBy('user', 'down')" [ngClass]="{'active': !!this.active['userdown']}"></div>
+          <div class="sort-arrow down" (click)="sortBy('user', 'up')" [ngClass]="{'active': !!this.active['userup']}"></div>
+        </div>
+        <div class="label">
+          <span class="text"> User </span>
+        </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filteredReportData.users.length > 0; else user_filtered">filter_list</span>
@@ -50,7 +56,11 @@
 
     <ng-container matColumnDef="project">
       <th mat-header-cell *matHeaderCellDef class="th_project">
-        <span class="label">Project</span>
+        <div class="sort">
+          <div class="sort-arrow up" (click)="sortBy('project', 'down')" [ngClass]="{'active': !!this.active['projectdown']}"></div>
+          <div class="sort-arrow down" (click)="sortBy('project', 'up')" [ngClass]="{'active': !!this.active['projectup']}"></div>
+        </div>
+        <div class="label"><span class="text">Project</span></div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filteredReportData.projects.length > 0; else project_filtered">filter_list</span>
@@ -64,7 +74,7 @@
 
     <ng-container matColumnDef="type">
       <th mat-header-cell *matHeaderCellDef class="th_type">
-        <span class="label"> Resource Type </span>
+        <div class="label"><span class="text"> Resource Type</span> </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filteredReportData.resource_type.length > 0; else type_filtered">filter_list</span>
@@ -78,7 +88,7 @@
 
     <ng-container matColumnDef="status">
       <th mat-header-cell *matHeaderCellDef class="th_status">
-        <span class="label"> Status </span>
+        <div class="label"><span class="text"> Status</span> </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span *ngIf="filteredReportData.statuses.length > 0; else status_filtered">filter_list</span>
@@ -96,7 +106,7 @@
 
     <ng-container matColumnDef="shape">
       <th mat-header-cell *matHeaderCellDef class="th_shape">
-        <span class="label"> Instance size</span>
+        <div class="label"><span class="text"> Instance size</span></div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span
@@ -113,7 +123,7 @@
 
     <ng-container matColumnDef="service">
       <th mat-header-cell *matHeaderCellDef class="service">
-        <span class="label"> Product </span>
+        <div class="label"><span class="text"> Product</span> </div>
         <button mat-icon-button aria-label="More" class="ar" (click)="toggleFilterRow()">
           <i class="material-icons">
             <span
@@ -131,7 +141,13 @@
 
     <ng-container matColumnDef="charge" stickyEnd>
       <th mat-header-cell *matHeaderCellDef class="th_charges">
-        <span class="label"> Service Charges </span>
+        <div class="label">
+          <div class="sort">
+            <div class="sort-arrow up" (click)="sortBy('cost', 'down')" [ngClass]="{'active': !!this.active['costdown']}"></div>
+            <div class="sort-arrow down" (click)="sortBy('cost', 'up')" [ngClass]="{'active': !!this.active['costup']}"></div>
+          </div>
+          <span class="text">Service Charges</span>
+        </div>
       </th>
 
       <td mat-cell *matCellDef="let element">
@@ -211,7 +227,7 @@
 
     <tr mat-header-row *matHeaderRowDef="displayedColumns; sticky: true" class="header-row"></tr>
 
-    <tr [hidden]="!collapseFilterRow || !PROVIDER" mat-header-row *matHeaderRowDef="displayedFilterColumns; sticky: true"
+    <tr [hidden]="!collapseFilterRow" mat-header-row *matHeaderRowDef="displayedFilterColumns; sticky: true"
       class="filter-row"></tr>
     <tr mat-row *matRowDef="let row; columns: displayedColumns;" class="content-row"></tr>
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
index bff6f6a..1e8082b 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
@@ -120,6 +120,7 @@
     min-width: 140px;
     padding-right: 15px;
     text-align: right;
+
     .label {
       padding-top: 0;
     }
@@ -151,19 +152,70 @@
 
   .header-row {
     position: unset;
+
+    .th_charges {
+      padding-top: 0;
+
+      .label {
+        padding-top: 12px;
+      }
+    }
+
     .label {
       display: inline-block;
-      padding-top: 15px;
+      padding-top: 13px;
       vertical-align: super !important;
-      padding-left: 15px;
+
+      .text{
+     padding-left: 15px;
+      }
+    }
+
+    .sort{
+      position: absolute;
+      bottom: 20px;
+
+      &-arrow{
+        width: 6px;
+        height: 6px;
+        border: 3px solid transparent;
+        border-bottom: 3px solid rgba(0,0,0,.54);
+        border-left: 3px solid rgba(0,0,0,.54);
+        cursor: pointer;
+
+        &.active{
+          border-bottom: 3px solid #35afd5;
+          border-left: 3px solid #35afd5;
+        }
+      }
+
+      .down{
+        transform: rotate(-45deg);
+      }
+
+      .up{
+        transform: rotate(135deg);
+      }
     }
   }
 
+
+
   .filter-row {
     .actions {
       text-align: right;
     }
   }
+
+  .table-footer{
+    position: sticky;
+    bottom: 0;
+    background: inherit;
+    border-top: 1px solid #E0E0E0;
+    transform: translateY(-1px);
+    border-bottom: none;
+    padding-left: 0 !important;
+  }
 }
 
 .dashboard_table_body {
@@ -187,6 +239,7 @@
   border-top: 1px solid #E0E0E0;
   transform: translateY(-1px);
   border-bottom: none;
+
   &.total-cost{
     min-width: 140px;
     padding-left: 0 !important;
@@ -195,7 +248,6 @@
 
 @media screen and (max-width: 1280px) {
   .dashboard_table.reporting {
-
     .env_name,
     .service,
     .th_type,
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts
index 29914e5..3d99814 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.ts
@@ -38,12 +38,12 @@
   reportData: Array<any> = [];
   fullReport: Array<any>;
   isFiltered: boolean = false;
+  active: object = {};
 
   @ViewChild('nameFilter', { static: false }) filter;
 
   @Output() filterReport: EventEmitter<{}> = new EventEmitter();
   @Output() resetRangePicker: EventEmitter<boolean> = new EventEmitter();
-  @Input() PROVIDER: string;
   displayedColumns: string[] = ['name', 'user', 'project', 'type', 'status', 'shape', 'service', 'charge'];
   displayedFilterColumns: string[] = ['name-filter', 'user-filter', 'project-filter', 'type-filter', 'status-filter', 'shape-filter', 'service-filter', 'actions'];
 
@@ -65,6 +65,25 @@
     }
   }
 
+  sortBy(sortItem, direction) {
+  let report: Array<object>;
+  if (direction === 'down') {
+    report = this.reportData.sort((a, b) => (a[sortItem] > b[sortItem]) ? 1 : ((b[sortItem] > a[sortItem]) ? -1 : 0));
+  }
+  if (direction === 'up') {
+    report = this.reportData.sort((a, b) => (a[sortItem] < b[sortItem]) ? 1 : ((b[sortItem] < a[sortItem]) ? -1 : 0));
+  }
+  this.refreshData(this.fullReport, report);
+  this.removeSorting();
+  this.active[sortItem + direction] = true;
+  }
+
+  removeSorting() {
+    for (const item in this.active) {
+      this.active[item] = false;
+    }
+  }
+
   toggleFilterRow(): void {
     this.collapseFilterRow = !this.collapseFilterRow;
   }
@@ -76,11 +95,12 @@
   filter_btnClick(): void {
     this.filterReport.emit(this.filteredReportData);
     this.isFiltered = true;
+    this.removeSorting();
   }
 
   resetFiltering(): void {
     this.filteredReportData.defaultConfigurations();
-
+    this.removeSorting();
     this.filter.nativeElement.value = '';
     this.filterReport.emit(this.filteredReportData);
     this.resetRangePicker.emit(true);
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
index 8cfdcd9..3d04931 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
@@ -21,14 +21,14 @@
 import { Component, OnInit, OnDestroy, ViewChild } from '@angular/core';
 import { ToastrService } from 'ngx-toastr';
 
-import {BillingReportService, EndpointService, HealthStatusService} from '../core/services';
+import {BillingReportService, HealthStatusService} from '../core/services';
 import { ReportingGridComponent } from './reporting-grid/reporting-grid.component';
 import { ToolbarComponent } from './toolbar/toolbar.component';
 
 import { FileUtils } from '../core/util';
 import { DICTIONARY, ReportingConfigModel } from '../../dictionary/global.dictionary';
 import {ProgressBarService} from '../core/services/progress-bar.service';
-import {logger} from 'codelyzer/util/logger';
+
 
 @Component({
   selector: 'dlab-reporting',
@@ -39,15 +39,15 @@
                   (setRangeOption)="setRangeOption($event)">
     </dlab-toolbar>
     <mat-divider></mat-divider>
-    <dlab-reporting-grid [PROVIDER]="PROVIDER" (filterReport)="filterReport($event)" (resetRangePicker)="resetRangePicker()"></dlab-reporting-grid>
+    <dlab-reporting-grid (filterReport)="filterReport($event)" (resetRangePicker)="resetRangePicker()"></dlab-reporting-grid>
   </div>
 
   `,
   styles: [`
     footer {
       position: fixed;
-      left: 0px;
-      bottom: 0px;
+      left: 0;
+      bottom: 0;
       width: 100%;
       background: #a1b7d1;
       color: #ffffff;
@@ -69,18 +69,15 @@
   data: any;
   billingEnabled: boolean;
   admin: boolean;
-  public PROVIDER: string;
 
   constructor(
     private billingReportService: BillingReportService,
     private healthStatusService: HealthStatusService,
     public toastr: ToastrService,
     private progressBarService: ProgressBarService,
-    private endpointService: EndpointService,
   ) { }
 
   ngOnInit() {
-
     this.getEnvironmentHealthStatus();
   }
 
@@ -88,38 +85,13 @@
     this.clearStorage();
   }
 
-  getBillingProvider() {
-    if (this.admin) {
-      this.endpointService.getEndpointsData().subscribe(list => {
-        const endpoints = JSON.parse(JSON.stringify(list));
-        const localEndpoint = endpoints.filter(endpoint => endpoint.name === 'local');
-        if (localEndpoint.length) {
-          this.PROVIDER = localEndpoint[0].cloudProvider.toLowerCase();
-          if (this.PROVIDER) {
-            this.rebuildBillingReport();
-          }
-        }
-      }, e => {
-        this.PROVIDER = 'azure';
-        if (this.PROVIDER) {
-          this.rebuildBillingReport();
-        }
-      }) ;
-    } else {
-      this.PROVIDER = 'azure';
-      if (this.PROVIDER) {
-        this.rebuildBillingReport();
-      }
-    }
-  }
-
   getGeneralBillingData() {
     setTimeout(() => {this.progressBarService.startProgressBar(); } , 0);
     this.billingReportService.getGeneralBillingData(this.reportData)
       .subscribe(data => {
         this.data = data;
         this.reportingGrid.refreshData(this.data, this.data.report_lines);
-        this.reportingGrid.setFullReport(this.data.full_report);
+        this.reportingGrid.setFullReport(this.data.is_full);
 
         this.reportingToolbar.reportData = this.data;
         if (!localStorage.getItem('report_period')) {
@@ -140,13 +112,11 @@
       }, () => this.progressBarService.stopProgressBar());
   }
 
-  rebuildBillingReport($event?): void {
-    if (this.PROVIDER) {
-      this.clearStorage();
-      this.resetRangePicker();
-      this.reportData.defaultConfigurations();
-      this.getGeneralBillingData();
-    }
+  rebuildBillingReport(): void {
+    this.clearStorage();
+    this.resetRangePicker();
+    this.reportData.defaultConfigurations();
+    this.getGeneralBillingData();
   }
 
   exportBillingReport(): void {
@@ -229,7 +199,7 @@
       .subscribe((result: any) => {
         this.billingEnabled = result.billingEnabled;
         this.admin = result.admin;
-        this.getBillingProvider();
+        this.rebuildBillingReport();
       });
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
index 75ea01e..11002e1 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
@@ -91,14 +91,14 @@
 
   public getClusterConfiguration(): void {
     this.dataengineConfigurationService
-      .getClusterConfiguration(this.environment.name, this.resource.computational_name, this.PROVIDER)
+      .getClusterConfiguration(this.environment.project, this.environment.name, this.resource.computational_name, this.PROVIDER)
       .subscribe((result: any) => this.config = result,
         error => this.toastr.error(error.message || 'Configuration loading failed!', 'Oops!'));
   }
 
   public editClusterConfiguration(data): void {
     this.dataengineConfigurationService
-      .editClusterConfiguration(data.configuration_parameters, this.environment.name, this.resource.computational_name, this.PROVIDER)
+      .editClusterConfiguration(data.configuration_parameters, this.environment.project, this.environment.name, this.resource.computational_name, this.PROVIDER)
       .subscribe(result => {
         this.dialogRef.close();
       },
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
index aab4e24..ec3c3ac 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
@@ -69,15 +69,16 @@
               <label class="label">Cluster alias</label>
               <div class="control">
                 <input
-                  [class.danger_field]="computationalResourceExist || !resourceForm?.controls['cluster_alias_name'].valid
+                  [class.danger_field]="!resourceForm?.controls['cluster_alias_name'].valid
                         && resourceForm?.controls['cluster_alias_name'].dirty && resourceForm?.controls['cluster_alias_name'].hasError('duplication')"
                   type="text" class="form-control" placeholder="Enter cluster alias"
                   formControlName="cluster_alias_name" />
-                <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('duplication')">This
-                  cluster name already exists.</span>
+                <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('user-duplication')">You have cluster with this name in current project.</span>
+                <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('other-user-duplication')">Other user has cluster with this name in current project.</span>
                 <span class="error" *ngIf="!resourceForm?.controls.cluster_alias_name.valid
                                             && resourceForm?.controls['cluster_alias_name'].dirty
-                                            && !resourceForm?.controls['cluster_alias_name'].hasError('duplication')">
+                                            && !resourceForm?.controls['cluster_alias_name'].hasError('user-duplication')
+                                            && !resourceForm?.controls['cluster_alias_name'].hasError('other-user-duplication')">
                   Cluster name cannot be longer than {{DICTIONARY[PROVIDER].max_cluster_name_length}} characters
                   and can only contain letters, numbers, hyphens and '_' but can not end with special
                   characters
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
index 29ef7d4..0c867f9 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
@@ -44,6 +44,8 @@
   notebook_instance: any;
   resourcesList: any;
   clusterTypes = [];
+  userComputations = [];
+  projectComputations = [];
   selectedImage: any;
   spotInstance: boolean = true;
 
@@ -76,7 +78,7 @@
     this.notebook_instance = this.data.notebook;
     this.resourcesList = this.data.full_list;
     this.initFormModel();
-    this.getTemplates(this.notebook_instance.project, this.notebook_instance.endpoint);
+    this.getTemplates(this.notebook_instance.project, this.notebook_instance.endpoint, this.notebook_instance.cloud_provider);
   }
 
   public selectImage($event) {
@@ -229,16 +231,24 @@
   }
 
   private checkDuplication(control) {
-    if (this.containsComputationalResource(control.value))
-      return { duplication: true };
+    if (this.containsComputationalResource(control.value, this.userComputations)){
+      return { 'user-duplication': true };
+    }
+
+    if (this.containsComputationalResource(control.value, this.projectComputations)){
+      return { 'other-user-duplication': true };
+    }
   }
 
-  private getTemplates(project, endpoint) {
-    this.userResourceService.getComputationalTemplates(project, endpoint).subscribe(
+  private getTemplates(project, endpoint, provider) {
+    this.userResourceService.getComputationalTemplates(project, endpoint, provider).subscribe(
       clusterTypes => {
-        this.clusterTypes = clusterTypes;
+        this.clusterTypes = clusterTypes.templates;
+        this.userComputations = clusterTypes.user_computations;
+        this.projectComputations = clusterTypes.project_computations;
+
         this.clusterTypes.forEach((cluster, index) => this.clusterTypes[index].computation_resources_shapes = SortUtils.shapesSort(cluster.computation_resources_shapes));
-        this.selectedImage = clusterTypes[0];
+        this.selectedImage = clusterTypes.templates[0];
 
         if (this.selectedImage) {
           this._ref.detectChanges();
@@ -284,10 +294,10 @@
     return filtered;
   }
 
-  private containsComputationalResource(conputational_resource_name: string): boolean {
+  private containsComputationalResource(conputational_resource_name: string, existNames: Array<string>): boolean {
     if (conputational_resource_name) {
-      return this.notebook_instance.resources.some(resource =>
-        CheckUtils.delimitersFiltering(conputational_resource_name) === CheckUtils.delimitersFiltering(resource.computational_name));
+      return existNames.some(resource =>
+        CheckUtils.delimitersFiltering(conputational_resource_name) === CheckUtils.delimitersFiltering(resource));
     }
   }
 }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
index 9314137..a4b825e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
@@ -110,20 +110,21 @@
    }
  }
  @media screen and (max-width: 1520px) {
-   .source {
+   .resources,
+   managment {
+     .source {
+       .resource-wrap {
+         .resource-name {
+           width: 45%;
+         }
 
-     .resource-wrap {
+         .resource-status {
+           width: 40%;
+         }
 
-       .resource-name {
-         width: 45%;
-       }
-
-       .resource-status {
-         width: 40%;
-       }
-
-       .resource-actions {
-         width: 15%;
+         .resource-actions {
+           width: 15%;
+         }
        }
      }
    }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
index f7bfa1d..7bc5126 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
@@ -60,7 +60,7 @@
             });
         } else if (result && action === 'terminate') {
           this.userResourceService
-            .suspendComputationalResource(this.environment.name, resource.computational_name, this.environment.cloud_provider)
+            .suspendComputationalResource(this.environment.project, this.environment.name, resource.computational_name, this.environment.cloud_provider)
             .subscribe(() => {
               this.rebuildGrid();
             });
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
index 1e7c25c..4a7a4a6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
@@ -33,9 +33,7 @@
               image name already exists in project.</span>
             <span class="error"
               *ngIf="!createAMIForm.valid && createAMIForm.controls['name'].dirty && !createAMIForm.controls['name'].hasError('duplication')">
-              image name
-              <span *ngIf="provider === 'azure'"> cannot be longer than 10 characters and</span>
-              can only contain letters, numbers, hyphens and '_'</span>
+              Name cannot be longer than 10 characters and can only contain letters, numbers, hyphens and '_' but can not end with special characters</span>
           </div>
         </div>
         <div class="control-group">
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
index ff17382..0ec266e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
@@ -66,7 +66,8 @@
     this.createAMIForm = this._fb.group({
       name: ['', [Validators.required, Validators.pattern(this.namePattern), this.providerMaxLength, this.checkDuplication.bind(this)]],
       description: [''],
-      exploratory_name: [this.notebook.name]
+      exploratory_name: [this.notebook.name],
+      project_name: [this.notebook.project]
     });
   }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
index 1fa43ca..db097f3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
@@ -76,7 +76,7 @@
 
   public getClusterConfiguration(): void {
     this.dataengineConfigurationService
-      .getExploratorySparkConfiguration(this.notebook.name)
+      .getExploratorySparkConfiguration(this.notebook.project, this.notebook.name)
       .subscribe(
         (result: any) => this.config = result,
         error => this.toastr.error(error.message || 'Configuration loading failed!', 'Oops!'));
@@ -95,7 +95,7 @@
 
   public editClusterConfiguration(data): void {
     this.dataengineConfigurationService
-      .editExploratorySparkConfiguration(data.configuration_parameters, this.notebook.name)
+      .editExploratorySparkConfiguration(data.configuration_parameters, this.notebook.project, this.notebook.name)
       .subscribe(result => {
         this.dialogRef.close();
       },
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
index 2f8e28d..8ff7918 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
@@ -456,3 +456,16 @@
     }
   }
 }
+
+@media screen and (max-height: 800px) {
+  .libs-info {
+    height: 50%;
+
+    .mat-list {
+      .scrollingList {
+        max-height: 140px;
+        height: 60%;
+      }
+    }
+  }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
index a4bfe90..ebe8281 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
@@ -105,7 +105,7 @@
   }
 
   uploadLibGroups(): void {
-    this.librariesInstallationService.getGroupsList(this.notebook.name, this.model.computational_name)
+    this.librariesInstallationService.getGroupsList(this.notebook.project, this.notebook.name, this.model.computational_name)
       .subscribe(
         response => {
           this.libsUploadingStatus(response);
@@ -269,7 +269,7 @@
   }
 
   private getInstalledLibsByResource() {
-    this.librariesInstallationService.getInstalledLibsByResource(this.notebook.name, this.model.computational_name)
+    this.librariesInstallationService.getInstalledLibsByResource(this.notebook.project, this.notebook.name, this.model.computational_name)
       .subscribe((data: any) => this.destination.libs = data);
   }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
index 6d0369e..c47673b 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
@@ -56,7 +56,8 @@
   }
 
   public getLibrariesList(group: string, query: string): Observable<{}> {
-    let lib_query: any = {
+    const lib_query: any = {
+      project_name: this.notebook.project,
       exploratory_name: this.notebook.name,
       group: group,
       start_with: query
@@ -75,12 +76,14 @@
 
   public getInstalledLibrariesList(notebook): Observable<{}> {
     return this.librariesInstallationService.getInstalledLibrariesList(
-      notebook.name
+      notebook.project, notebook.name
     );
   }
 
   private installLibraries(retry?: Library, item?): Observable<{}> {
-    let lib_list: any = {
+    console.log(this.notebook.project);
+    const lib_list: any = {
+      project_name: this.notebook.project,
       exploratory_name: this.notebook.name,
       libs: retry ? retry : this.selectedLibs
     };
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
index 6f05128..965f9d8 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
@@ -93,7 +93,9 @@
       </th>
     </ng-container>
     <ng-container matColumnDef="actions" stickyEnd>
-      <th mat-header-cell *matHeaderCellDef class="actions-col"></th>
+      <th mat-header-cell *matHeaderCellDef class="actions-col">
+        <span class="label"> Actions </span>
+      </th>
     </ng-container>
 
     <!-- ----------------------------------------------------- -->
@@ -166,10 +168,10 @@
                     </div>
                   </li>
                   <li *ngIf="element.status.toLowerCase() === 'stopped' || element.status.toLowerCase() === 'stopping'"
-                    matTooltip="Unable to run notebook until it will be stopped" matTooltipPosition="above"
-                    [matTooltipDisabled]="!isResourcesInProgress(element) && element.status.toLowerCase() !== 'stopping'">
+                    matTooltip="{{isEdgeNodeStopped(element) ? 'Unable to run notebook if edge node is stopped.' : 'Unable to run notebook until it will be stopped.'}}" matTooltipPosition="above"
+                    [matTooltipDisabled]="!isResourcesInProgress(element) && element.status.toLowerCase() !== 'stopping' && !isEdgeNodeStopped(element)">
                     <div (click)="exploratoryAction(element, 'run')"
-                      [ngClass]="{'not-allowed': isResourcesInProgress(element) || element.status.toLowerCase() === 'stopping' }">
+                      [ngClass]="{'not-allowed': isResourcesInProgress(element) || element.status.toLowerCase() === 'stopping' || isEdgeNodeStopped(element) }">
                       <i class="material-icons">play_circle_outline</i>
                       <span>Run</span>
                     </div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
index 57abbc1..e09ff3e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
@@ -95,11 +95,17 @@
     padding-right: 5px;
     padding-left: 24px;
     background-color: inherit;
+    .label{
+      padding-top: 14px;
+    }
   }
 
   .status-col,
   .shape-col {
     width: 14%;
+    .label{
+      padding-top: 14px;
+    }
   }
 
   .tag-col {
@@ -120,6 +126,9 @@
 
   .resources-col {
     width: 28%;
+    .label{
+      padding-top: 14px;
+    }
   }
 
   .cost-col {
@@ -132,6 +141,9 @@
     padding-right: 24px;
     text-align: right;
     background-color: inherit;
+    .label{
+      padding-right: 5px;
+    }
   }
 }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
index ef8afa4..3052d84 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
@@ -18,7 +18,7 @@
  */
 /* tslint:disable:no-empty */
 
-import { Component, OnInit } from '@angular/core';
+import {Component, Input, OnInit} from '@angular/core';
 import { animate, state, style, transition, trigger } from '@angular/animations';
 import { ToastrService } from 'ngx-toastr';
 import { MatDialog } from '@angular/material/dialog';
@@ -62,6 +62,8 @@
 export class ResourcesGridComponent implements OnInit {
   readonly DICTIONARY = DICTIONARY;
 
+  @Input() projects: Array<any>;
+
   environments: Exploratory[];
 
   collapseFilterRow: boolean = false;
@@ -140,7 +142,7 @@
 
 
   public isResourcesInProgress(notebook) {
-    const env = this.getResourceByName(notebook.name);
+    const env = this.getResourceByName(notebook.name, notebook.project);
 
     if (env && env.resources.length) {
       return env.resources.filter(item => (item.status !== 'failed' && item.status !== 'terminated'
@@ -149,6 +151,12 @@
     return false;
   }
 
+  public isEdgeNodeStopped(resource) {
+    const currProject = this.projects.filter(proj => proj.name === resource.project);
+    const currEdgenodeStatus =  currProject[0].endpoints.filter(node => node.name === resource.endpoint)[0].status;
+    return currEdgenodeStatus === 'STOPPED' || currEdgenodeStatus === 'STOPPING';
+  }
+
   public filterActiveInstances(): FilterConfigurationModel {
     return (<FilterConfigurationModel | any>Object).assign({}, this.filterConfiguration, {
       statuses: SortUtils.activeStatuses(),
@@ -175,7 +183,7 @@
   }
 
   public exploratoryAction(data, action: string) {
-    const resource = this.getResourceByName(data.name);
+    const resource = this.getResourceByName(data.name, data.project);
 
     if (action === 'deploy') {
       this.dialog.open(ComputationalResourceCreateDialogComponent, { data: { notebook: resource, full_list: this.environments }, panelClass: 'modal-xxl' })
@@ -208,8 +216,8 @@
 
 
   // PRIVATE
-  private getResourceByName(notebook_name: string) {
-    return this.getEnvironmentsListCopy()
+  private getResourceByName(notebook_name: string, project_name: string) {
+    return this.getEnvironmentsListCopy().filter(environments => environments.project === project_name)
       .map(env => env.exploratory.find(({ name }) => name === notebook_name))
       .filter(name => !!name)[0];
   }
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html
index b705c38..091ccb7 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html
@@ -63,5 +63,5 @@
     </div>
   </div>
   <mat-divider></mat-divider>
-  <resources-grid></resources-grid>
+  <resources-grid [projects] = "projects"></resources-grid>
 </div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
index 20b8ce5..0df2a59 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
@@ -88,7 +88,7 @@
 
   public open(notebook, type, resource?): void {
     this.notebook = notebook;
-
+    console.log(this.notebook)
     this.zones = _moment.tz.names()
       .map(item => [_moment.tz(item).format('Z'), item])
       .sort()
@@ -116,11 +116,11 @@
 
         if (this.destination.type === 'СOMPUTATIONAL') {
           this.allowInheritView = true;
-          this.getExploratorySchedule(this.notebook.name, this.destination.computational_name);
+          this.getExploratorySchedule(this.notebook.project, this.notebook.name, this.destination.computational_name);
           this.checkParentInherit();
         } else if (this.destination.type === 'EXPLORATORY') {
           this.allowInheritView = this.checkIsActiveSpark();
-          this.getExploratorySchedule(this.notebook.name);
+          this.getExploratorySchedule(this.notebook.project, this.notebook.name);
         }
       },
       this.schedulerService
@@ -139,7 +139,7 @@
     this.inherit = $event.checked;
 
     if (this.destination.type === 'СOMPUTATIONAL' && this.inherit) {
-      this.getExploratorySchedule(this.notebook.name);
+      this.getExploratorySchedule(this.notebook.project, this.notebook.name);
       this.schedulerForm.get('startDate').disable();
     } else {
       this.schedulerForm.get('startDate').enable();
@@ -248,18 +248,19 @@
     };
 
     if (this.destination.type === 'СOMPUTATIONAL') {
-      this.model.confirmAction(this.notebook.name, parameters, this.destination.computational_name);
+      this.model.confirmAction(this.notebook.project, this.notebook.name, parameters, this.destination.computational_name);
     } else {
       parameters['consider_inactivity'] = this.considerInactivity;
-      this.model.confirmAction(this.notebook.name, parameters);
+      this.model.confirmAction(this.notebook.project, this.notebook.name, parameters);
     }
   }
 
   private setScheduleByInactivity() {
+    console.log(this.notebook)
     const data = { sync_start_required: this.parentInherit, check_inactivity_required: this.enableIdleTime, max_inactivity: this.schedulerForm.controls.inactivityTime.value };
     (this.destination.type === 'СOMPUTATIONAL')
-      ? this.setInactivity(this.notebook.name, data, this.destination.computational_name)
-      : this.setInactivity(this.notebook.name, { ...data, consider_inactivity: this.considerInactivity });
+      ? this.setInactivity(this.notebook.project, this.notebook.name, data, this.destination.computational_name)
+      : this.setInactivity(this.notebook.project, this.notebook.name, { ...data, consider_inactivity: this.considerInactivity });
   }
 
   private formInit(start?: string, end?: string, terminate?: string) {
@@ -272,8 +273,8 @@
     });
   }
 
-  private getExploratorySchedule(resource, resource2?) {
-    this.schedulerService.getExploratorySchedule(resource, resource2).subscribe(
+  private getExploratorySchedule(project, resource, resource2?) {
+    this.schedulerService.getExploratorySchedule(project, resource, resource2).subscribe(
       (params: ScheduleSchema) => {
         if (params) {
           params.start_days_repeat.filter(key => (this.selectedStartWeekDays[key.toLowerCase()] = true));
@@ -302,7 +303,7 @@
   }
 
   private checkParentInherit() {
-    this.schedulerService.getExploratorySchedule(this.notebook.name)
+    this.schedulerService.getExploratorySchedule(this.notebook.project, this.notebook.name)
       .subscribe((res: any) => this.parentInherit = res.sync_start_required);
   }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
index f83be29..c0093ee 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
@@ -61,13 +61,13 @@
     if (this.continueWith) this.continueWith();
   }
 
-  private scheduleInstance(notebook, params, resourse) {
-    return this.schedulerService.setExploratorySchedule(notebook, params, resourse);
+  private scheduleInstance(project, notebook, params, resourse) {
+    return this.schedulerService.setExploratorySchedule(project, notebook, params, resourse);
   }
 
   public setInactivityTime(params) {
-    const [notebook, data, resource] = params;
-    return this.scheduleInstance(notebook, data, resource);
+    const [project, notebook, data, resource] = params;
+    return this.scheduleInstance(project, notebook, data, resource);
   }
 
   public resetSchedule(notebook, resourse) {
@@ -75,8 +75,8 @@
   }
 
   private prepareModel(fnProcessResults: any, fnProcessErrors: any): void {
-    this.confirmAction = (notebook, data, resourse?) =>
-      this.scheduleInstance(notebook, data, resourse).subscribe(
+    this.confirmAction = (project, notebook, data, resourse?) =>
+      this.scheduleInstance(project, notebook, data, resourse).subscribe(
         response => fnProcessResults(response),
         error => fnProcessErrors(error)
       );
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
index bac0dd6..4ea5a14 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
@@ -25,6 +25,7 @@
 import { DirectivesModule } from '../../core/directives';
 import { KeysPipeModule, UnderscorelessPipeModule } from '../../core/pipes';
 import { BubbleModule } from '..';
+import {MultiLevelSelectDropdownComponent} from './multi-level-select-dropdown/multi-level-select-dropdown.component';
 
 export * from './multi-select-dropdown/multi-select-dropdown.component';
 export * from './dropdown-list/dropdown-list.component';
@@ -37,7 +38,7 @@
     UnderscorelessPipeModule,
     BubbleModule
   ],
-  declarations: [DropdownListComponent, MultiSelectDropdownComponent],
-  exports: [DropdownListComponent, MultiSelectDropdownComponent]
+  declarations: [DropdownListComponent, MultiSelectDropdownComponent, MultiLevelSelectDropdownComponent],
+  exports: [DropdownListComponent, MultiSelectDropdownComponent, MultiLevelSelectDropdownComponent]
 })
 export class FormControlsModule {}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html
new file mode 100644
index 0000000..4e41606
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html
@@ -0,0 +1,94 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing,
+  ~ software distributed under the License is distributed on an
+  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  ~ KIND, either express or implied.  See the License for the
+  ~ specific language governing permissions and limitations
+  ~ under the License.
+  -->
+
+<div class="dropdown-multiselect btn-group" ngClass="{{type || ''}}">
+  <button type="button" #list (click)="multiactions.toggle($event, list)">
+    <span class="ellipsis" *ngIf="model.length === 0">Select roles</span>
+    <span class="selected-items ellipsis" *ngIf="model.length !== 0">
+      {{selectedRolesList()}}
+    </span>
+    <span class="caret-btn"><i class="material-icons">keyboard_arrow_down</i></span>
+  </button>
+
+  <bubble-up #multiactions position="bottom" [keep-open]="true" class="mt-5">
+    <ul class="list-menu" id="scrolling">
+      <li class="filter-actions">
+        <a class="select_all" (click)="selectAllOptions($event)">
+          <i class="material-icons">playlist_add_check</i>&nbsp;All
+        </a>
+        <a class="deselect_all" (click)="deselectAllOptions($event)">
+          <i class="material-icons">clear</i>&nbsp;None
+        </a>
+      </li>
+
+        <ng-template  ngFor let-item [ngForOf]="items" let-i="index">
+          <li class="role-label" role="presentation" *ngIf="i === 0 || model && item.type !== items[i - 1].type" (click)="toggleItemsForLable(item.type, $event)">
+            <a href="#" class="list-item" role="menuitem">
+              <span class="arrow" [ngClass]="{'rotate-arrow': isOpenCategory[item.type], 'arrow-checked': selectedAllInCattegory(item.type) || selectedSomeInCattegory(item.type)}"></span>
+              <span class="empty-checkbox" [ngClass]="{'checked': selectedAllInCattegory(item.type) || selectedSomeInCattegory(item.type)}" (click)="toggleselectedCategory($event, model, item.type);$event.stopPropagation()" >
+                <span class="checked-checkbox" *ngIf="selectedAllInCattegory(item.type)"></span>
+                <span class="line-checkbox" *ngIf="selectedSomeInCattegory(item.type)"></span>
+              </span>
+              {{labels[item.type] || item.type | titlecase}}
+            </a>
+          </li>
+
+          <li class="role-item" role="presentation" *ngIf="model && isOpenCategory[item.type] && item.type !== 'COMPUTATIONAL_SHAPE' && item.type !== 'NOTEBOOK_SHAPE'" >
+            <a href="#" class="list-item" role="menuitem" (click)="toggleSelectedOptions($event, model, item)">
+              <span class="empty-checkbox" [ngClass]="{'checked': checkInModel(item.role)}">
+                <span class="checked-checkbox" *ngIf="checkInModel(item.role)"></span>
+              </span>
+              {{item.role}}
+            </a>
+          </li>
+          <li class="role-item" role="presentation" (click)="toggleItemsForCloud(item.type + item.cloud, $event)"
+              *ngIf="model && isOpenCategory[item.type] && item.type === 'COMPUTATIONAL_SHAPE' && item.cloud !== items[i - 1].cloud
+              || model && isOpenCategory[item.type] && item.type === 'NOTEBOOK_SHAPE' && item.type !== items[i - 1].type
+              || model && isOpenCategory[item.type] && item.type === 'NOTEBOOK_SHAPE' && item.cloud !== items[i - 1].cloud
+              || model && isOpenCategory[item.type] && item.type === 'COMPUTATIONAL_SHAPE' && item.type !== items[i - 1].type"
+          >
+            <a href="#" class="list-item" role="menuitem">
+              <span class="arrow" [ngClass]="{'rotate-arrow': isCloudOpen[item.type + item.cloud], 'arrow-checked': selectedAllInCloud(item.type, item.cloud) || selectedSomeInCloud(item.type, item.cloud)}"></span>
+              <span class="empty-checkbox" [ngClass]="{'checked': selectedAllInCloud(item.type, item.cloud) || selectedSomeInCloud(item.type, item.cloud)}" (click)="toggleSelectedCloud($event, model, item.type, item.cloud);$event.stopPropagation()" >
+                <span class="checked-checkbox" *ngIf="selectedAllInCloud(item.type, item.cloud)"></span>
+                <span class="line-checkbox" *ngIf="selectedSomeInCloud(item.type, item.cloud)"></span>
+              </span>
+              {{item.cloud || 'AWS'}}
+            </a>
+          </li>
+          <li class="role-cloud-item" role="presentation" *ngIf="model && isCloudOpen[item.type + item.cloud] && isOpenCategory[item.type]" >
+            <a href="#" class="list-item" role="menuitem" (click)="toggleSelectedOptions($event, model, item)">
+              <span class="empty-checkbox" [ngClass]="{'checked': checkInModel(item.role)}">
+                <span class="checked-checkbox" *ngIf="checkInModel(item.role)"></span>
+              </span>
+              {{item.role}}
+            </a>
+          </li>
+
+        </ng-template>
+
+      <li *ngIf="items?.length == 0">
+        <a role="menuitem" class="list-item">
+          <span class="material-icons">visibility_off</span>
+          No {{type}}
+        </a>
+      </li>
+    </ul>
+  </bubble-up>
+</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss
new file mode 100644
index 0000000..5323a24
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss
@@ -0,0 +1,321 @@
+/*!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+.dropdown-list,
+.dropdown-multiselect {
+  width: 100%;
+  position: relative;
+}
+
+.dropdown-list button,
+.dropdown-multiselect button {
+  height: 38px;
+  width: 100%;
+  background: #fff;
+  padding-left: 15px;
+  font-size: 14px;
+  // height: 34px;
+  text-align: left;
+  white-space: nowrap;
+  cursor: pointer;
+  border-radius: 0;
+  border: none;
+  outline: none;
+  box-shadow: 0 3px 1px -2px rgba(0, 0, 0, .2), 0 2px 2px 0 rgba(0, 0, 0, .14), 0 1px 5px 0 rgba(0, 0, 0, .12);
+}
+
+.dropdown-list {
+  button {
+    line-height: 38px;
+
+    span {
+      color: #4a5c89;
+
+      em {
+        font-size: 13px;
+        color: #35afd5;
+        margin-right: 0px;
+        font-style: normal;
+      }
+    }
+  }
+}
+
+.dropdown-list button:active,
+.dropdown-list button:focus,
+.dropdown-multiselect button:active,
+.dropdown-multiselect button:focus {
+  box-shadow: 0 5px 5px -3px rgba(0, 0, 0, .2), 0 8px 10px 1px rgba(0, 0, 0, .14), 0 3px 14px 2px rgba(0, 0, 0, .12);
+}
+
+.dropdown-multiselect {
+  button {
+    span {
+      color: #999;
+      font-weight: 300;
+      display: inline-block;
+      max-width: 80%;
+    }
+
+    .selected-items {
+      color: #4a5c89;
+      max-width: 477px;
+    }
+  }
+}
+
+.selected-items strong {
+  font-weight: 300;
+}
+
+.dropdown-list,
+.dropdown-multiselect {
+  .caret-btn {
+    position: absolute;
+    top: 0;
+    right: 0;
+    width: 40px;
+    height: 100%;
+    text-align: center;
+    padding: 7px;
+    -webkit-appearance: none;
+    -moz-appearance: none;
+    border-left: 1px solid #ececec;
+    background: #fff;
+    color: #36afd5 !important;
+  }
+
+  .list-menu {
+    width: 100%;
+    max-height: 450px;
+    left: 0;
+    padding: 0;
+    margin: 0;
+    overflow-y: auto;
+    overflow-x: hidden;
+
+    li {
+      padding: 0;
+      margin: 0;
+    }
+    .role-item{
+      padding-left: 30px;
+    }
+    .role-cloud-item{
+      padding-left: 60px;
+    }
+
+  }
+
+  &.statuses {
+    .list-menu {
+      .list-item {
+        text-transform: capitalize;
+      }
+    }
+  }
+
+  &.resources {
+    .list-menu {
+      .list-item {
+        text-transform: capitalize;
+      }
+    }
+  }
+}
+
+.dropdown-list .list-menu a,
+.dropdown-multiselect .list-menu li a {
+  display: block;
+  padding: 10px;
+  padding-left: 15px;
+  position: relative;
+  font-weight: 300;
+  cursor: pointer;
+  color: #4a5c89;
+  text-decoration: none;
+}
+
+.dropdown-multiselect .list-menu li a {
+  padding-left: 45px;
+  transition: all .45s ease-in-out;
+}
+
+.dropdown-list .list-menu a:hover,
+.dropdown-multiselect .list-menu a:hover {
+  background: #f7f7f7;
+  color: #35afd5;
+}
+
+.dropdown-multiselect .list-menu .filter-actions {
+  display: flex;
+  cursor: pointer;
+  border-bottom: 1px solid #ececec;
+}
+
+.dropdown-multiselect .list-menu .filter-actions a {
+  width: 50%;
+  color: #35afd5;
+  display: block;
+  padding: 0;
+  line-height: 40px !important;
+  text-align: center;
+}
+
+.dropdown-list {
+
+  .list-menu,
+  .title {
+    span {
+      display: flex;
+      justify-content: space-between;
+      align-items: center;
+      font-weight: 300;
+    }
+  }
+}
+
+.dropdown-list .list-menu li span.caption {
+  display: block;
+  padding: 10px 15px;
+  cursor: default;
+}
+
+.dropdown-list .list-menu li i,
+.dropdown-list .list-menu li strong {
+  display: inline-block;
+  width: 30px;
+  text-align: center;
+  vertical-align: middle;
+  color: #35afd5;
+  line-height: 26px;
+}
+
+.dropdown-list .list-menu li i {
+  vertical-align: sub;
+  font-size: 18px;
+}
+
+.dropdown-list .list-menu a {
+  padding: 12px;
+  padding-left: 15px;
+  position: relative;
+  font-weight: 300;
+  cursor: pointer;
+
+  em {
+    font-size: 13px;
+    color: #35afd5;
+    margin-right: 0px;
+    font-style: normal;
+  }
+}
+
+.dropdown-list .list-menu a.empty {
+  height: 36px;
+}
+
+.dropdown-multiselect .list-menu .filter-actions i {
+  vertical-align: sub;
+  color: #35afd5;
+  font-size: 18px;
+  line-height: 26px;
+  transition: all .45s ease-in-out;
+}
+
+.dropdown-multiselect .list-menu .select_all:hover,
+.dropdown-multiselect .list-menu .select_all:hover i {
+  color: #4eaf3e !important;
+  background: #f9fafb;
+}
+
+.dropdown-multiselect .list-menu .deselect_all:hover,
+.dropdown-multiselect .list-menu .deselect_all:hover i {
+  color: #f1696e !important;
+  background: #f9fafb;
+}
+
+.dropdown-multiselect .list-menu a {
+  span {
+    position: absolute;
+    top: 10px;
+    left: 25px;
+    color: #35afd5;
+
+    &.checked-checkbox {
+      top: 0px;
+      left: 4px;
+      width: 5px;
+      height: 10px;
+      border-bottom: 2px solid white;
+      border-right: 2px solid white;
+      position: absolute;
+      transform: rotate(45deg);
+    }
+
+    &.line-checkbox {
+      top: 0px;
+      left: 2px;
+      width: 8px;
+      height: 7px;
+      border-bottom: 2px solid white;
+    }
+
+    &.arrow{
+      width: 16px;
+      height: 14px;
+      border: 8px solid transparent;
+      border-left: 8px solid lightgrey;
+      left: 10px;
+      top: 12px;
+      border-radius: 3px;
+
+      &.rotate-arrow{
+        transform: rotate(90deg);
+        transition: .1s ease-in-out;
+        top: 15px;
+        left: 6px;
+      }
+
+      &.arrow-checked{
+        border-left: 8px solid #35afd5;
+      }
+    }
+  }
+
+
+}
+
+.dropdown-multiselect.btn-group.open .dropdown-toggle {
+  box-shadow: none;
+}
+
+.empty-checkbox {
+  width: 16px;
+  height: 16px;
+  border-radius: 2px;
+  border: 2px solid lightgrey;
+  margin-top: 2px;
+  position: relative;
+  &.checked {
+    border-color: #35afd5;
+    background-color: #35afd5;
+  }
+}
+
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts
new file mode 100644
index 0000000..cabf7d9
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { Input, Output, Component, EventEmitter } from '@angular/core';
+
+@Component({
+  selector: 'multi-level-select-dropdown',
+  templateUrl: 'multi-level-select-dropdown.component.html',
+  styleUrls: ['multi-level-select-dropdown.component.scss']
+})
+
+export class MultiLevelSelectDropdownComponent {
+
+  @Input() items: Array<any>;
+  @Input() model: Array<any>;
+  @Input() type: string;
+  @Output() selectionChange: EventEmitter<{}> = new EventEmitter();
+
+  public isOpenCategory = {
+  };
+
+  public isCloudOpen = {
+
+  };
+
+  public labels = {
+    COMPUTATIONAL_SHAPE: 'Compute shapes',
+    NOTEBOOK_SHAPE: 'Notebook shapes',
+    COMPUTATIONAL: 'Compute'
+  };
+
+  toggleSelectedOptions($event, model, value) {
+    $event.preventDefault();
+    const currRole = model.filter(v => v.role === value.role).length;
+    currRole ? this.model = model.filter(v => v.role !== value.role) : model.push(value);
+    this.onUpdate($event);
+  }
+
+  toggleselectedCategory($event, model, value) {
+    $event.preventDefault();
+    const categoryItems = this.items.filter(role => role.type === value);
+    this.selectedAllInCattegory(value) ? this.model = this.model.filter(role => role.type !== value) : categoryItems.forEach(role => {
+      if (!model.filter(mod => mod.role === role.role).length) {this.model.push(role); }
+    });
+    this.onUpdate($event);
+  }
+
+  toggleSelectedCloud($event, model, category, cloud) {
+    $event.preventDefault();
+    const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+    this.selectedAllInCloud(category, cloud) ? this.model = this.model.filter(role => {
+      if (role.type === category && role.cloud === cloud) {
+        return false;
+      }
+      return true;
+    }) : categoryItems.forEach(role => {
+      if (!model.filter(mod => mod.role === role.role).length) {this.model.push(role); }
+    });
+    this.onUpdate($event);
+  }
+
+  selectAllOptions($event) {
+    $event.preventDefault();
+    this.model = [...this.items];
+    this.onUpdate($event);
+    $event.preventDefault();
+  }
+
+  deselectAllOptions($event) {
+    this.model = [];
+    this.onUpdate($event);
+    $event.preventDefault();
+  }
+
+  onUpdate($event): void {
+    this.selectionChange.emit({ model: this.model, type: this.type, $event });
+  }
+
+  public toggleItemsForLable(label, $event) {
+    this.isOpenCategory[label] = !this.isOpenCategory[label];
+    this.isCloudOpen[label + 'AWS'] = false;
+    this.isCloudOpen[label + 'GCP'] = false;
+    this.isCloudOpen[label + 'AZURE'] = false;
+    $event.preventDefault();
+  }
+
+  public toggleItemsForCloud(label, $event) {
+    this.isCloudOpen[label] = !this.isCloudOpen[label];
+    $event.preventDefault();
+  }
+
+  public selectedAllInCattegory(category) {
+    const selected = this.model.filter(role => role.type === category);
+    const categoryItems = this.items.filter(role => role.type === category);
+    return selected.length === categoryItems.length;
+  }
+
+  public selectedSomeInCattegory(category) {
+    const selected = this.model.filter(role => role.type === category);
+    const categoryItems = this.items.filter(role => role.type === category);
+    return selected.length && selected.length !== categoryItems.length;
+  }
+
+  public selectedAllInCloud(category, cloud) {
+    const selected = this.model.filter(role => role.type === category && role.cloud === cloud);
+    const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+    return selected.length === categoryItems.length;
+  }
+
+  public selectedSomeInCloud(category, cloud) {
+    const selected = this.model.filter(role => role.type === category && role.cloud === cloud);
+    const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+    return selected.length && selected.length !== categoryItems.length;
+  }
+
+  public checkInModel(item) {
+    return this.model.filter(v => v.role === item).length;
+  }
+
+  public selectedRolesList() {
+    return this.model.map(role => role.role).join(',');
+  }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
index b1fd8ba..9bb12e7 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
@@ -23,16 +23,22 @@
       <span *ngIf="model.notebook.name && model.notebook.name !== 'edge node'">
         <span>{{ confirmationType ? 'Terminate' : 'Stop' }} notebook: {{ model.notebook.name }}</span>
       </span>
-      <span *ngIf="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node' || data.type === 4">
+      <span *ngIf="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node' || data.type === 4 && data.notebook.length">
         <i class="material-icons">priority_high</i>Warning
       </span>
+      <span *ngIf="data.type === 4 && !data.notebook.length">
+        Update group data
+      </span>
     </h4>
     <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
   </header>
   <div class="dialog-content">
     <div class="content-box">
       <p *ngIf="data.type !== 4" class="info text-center">{{ model.title }}</p>
-      <p *ngIf="data.type === 4" class="text-center delete-user">User<span *ngIf="data.notebook.length>1">s</span>  <span class="strong"> {{data.notebook.join(', ')}} </span>will be deleted from this group. All <span *ngIf="data.notebook.length===1">his</span><span *ngIf="data.notebook.length>1">their</span> resources authorized within this group will be terminated.</p>
+      <div *ngIf="data.type === 4" class="text-center m-bot-20">
+        <h3 class="strong">Group data will be updated.</h3>
+      </div>
+      <p *ngIf="data.type === 4 && data.notebook.length" class="text-center delete-user">User<span *ngIf="data.notebook.length>1">s</span>  <span class="strong"> {{data.notebook.join(', ')}} </span>will be deleted from this group. All <span *ngIf="data.notebook.length===1">his</span><span *ngIf="data.notebook.length>1">their</span> resources authorized within this group will be terminated.</p>
       <mat-list class="resources"
         [hidden]="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node'
                                   || !model.notebook.resources || model.notebook.resources.length === 0 || (!isAliveResources && !confirmationType) || onlyKilled">
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
index 305c504..c71e2ed 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
@@ -18,6 +18,9 @@
  */
 
 .confirmation-dialog {
+  h3{
+    margin-bottom: 20px;
+  }
   color: #718ba6;
   p {
     font-size: 14px;
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
index 41c31cc..1bfcd06 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
@@ -71,19 +71,19 @@
 
   private stopExploratory(): Observable<{}> {
     return this.manageAction
-      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.name)
+      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.project, this.notebook.name)
       : this.userResourceService.suspendExploratoryEnvironment(this.notebook, 'stop');
   }
 
   private terminateExploratory(): Observable<{}> {
     return this.manageAction
-      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'terminate', this.notebook.name)
+      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'terminate', this.notebook.project,  this.notebook.name)
       : this.userResourceService.suspendExploratoryEnvironment(this.notebook, 'terminate');
   }
 
   private stopEdgeNode(): Observable<{}> {
     return this.manageAction
-      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', 'edge')
+      ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.project, 'edge')
       : this.healthStatusService.suspendEdgeNode();
   }
 
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
index f0c7910..20ec20f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
@@ -30,6 +30,13 @@
               <button type="button" class="close" (click)="dialogRef.close()">&times;</button>
           </header>
           <div mat-dialog-content class="content message">
+            <div *ngIf="data.type === 'terminateNode'" class="table-header">
+              <div *ngIf="data.item.action.endpoint.length > 0">
+                Edge node<span *ngIf="data.item.action.endpoint.length>1">s</span>
+                        <span class="strong">{{ ' ' + data.item.action.endpoint.join(', ') }}</span> in project
+                <span class="strong">{{ data.item.action.project_name }}</span> will be terminated.
+              </div>
+            </div>
               <div *ngIf="data.type === 'list'" class="info">
                   <div *ngIf="data.template.notebook.length > 0">
                       Following notebook server<span *ngIf="data.template.notebook.length>1">s </span>
@@ -80,12 +87,12 @@
                               </div>
                           </div>
                       </div>
-                      <div class="confirm-resource-terminating">
-                          <label>
-                              <input class="checkbox" type="checkbox"
-                                     (change)="terminateResource()"/>Do not terminate all related resources
-                          </label>
-                      </div>
+<!--                      <div class="confirm-resource-terminating">-->
+<!--                          <label>-->
+<!--                              <input class="checkbox" type="checkbox"-->
+<!--                                     (change)="terminateResource()"/>Do not terminate all related resources-->
+<!--                          </label>-->
+<!--                      </div>-->
                       <p class="confirm-message">
                           <span *ngIf="!willNotTerminate">All connected computational resources will be terminated as well.</span>
                       </p>
@@ -102,8 +109,7 @@
                           </mat-list-item>
                       </div>
                   </mat-list>
-                  <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
-
+                <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
                   <div class="text-center m-top-30 m-bott-10">
                       <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
                       <button *ngIf="!this.willNotTerminate" type="button" class="butt butt-success" mat-raised-button
@@ -114,6 +120,30 @@
                       </button>
                   </div>
               </div>
+               <div class="confirm-dialog" *ngIf="data.type === 'terminateNode'">
+                   <mat-list *ngIf="data.item.resources.length > 0">
+                     <mat-list-item class="list-header sans">
+                       <div class="endpoint">Resources</div>
+                       <div class="status">Further status</div>
+                     </mat-list-item>
+                     <div class="scrolling-content">
+                       <mat-list-item *ngFor="let resource of data.item.resources" class="sans node">
+                         <div class="endpoint ellipsis">{{resource}}</div>
+                         <div class="status terminated">Terminated</div>
+                       </mat-list-item>
+                     </div>
+                   </mat-list>
+                   <div mat-dialog-content class="bottom-message" *ngIf="data.item.resources.length > 0">
+                     <span class="confirm-message">All connected computational resources will be terminated as well.</span>
+                   </div>
+                 <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
+                 <div class="text-center m-top-30 m-bott-10">
+                   <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
+                   <button type="button" class="butt butt-success" mat-raised-button
+                           (click)="dialogRef.close(true)">Yes
+                   </button>
+                 </div>
+               </div>
           </div>
       </div>
   `,
@@ -137,9 +167,11 @@
     .resource-list-header{display: flex; font-weight: 600; font-size: 16px;height: 48px; border-top: 1px solid #edf1f5; border-bottom: 1px solid #edf1f5; padding: 0 20px;}
     .resource-list-row{display: flex; border-bottom: 1px solid #edf1f5;padding: 0 20px;}
     .confirm-resource-terminating{text-align: left; padding: 10px 20px;}
-    .confirm-message{color: #ef5c4b;font-size: 13px;min-height: 18px; text-align: center;}
+    .confirm-message{color: #ef5c4b;font-size: 13px;min-height: 18px; text-align: center; padding-top: 20px}
     .checkbox{margin-right: 5px;vertical-align: middle; margin-bottom: 3px;}
     label{cursor: pointer}
+    .bottom-message{padding-top: 15px;}
+    .table-header{padding-bottom: 10px;}
 
 
   `]
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html
index 9485a87..f2d715f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/navbar/navbar.component.html
@@ -75,7 +75,7 @@
             <span *ngIf="isExpanded; else resources">List of Resources</span>
             <ng-template #resources><i class="material-icons">dashboard</i></ng-template>
           </a>
-          <a class="nav-item has-children" *ngIf="healthStatus?.admin">
+          <a class="nav-item has-children" *ngIf="healthStatus?.admin || healthStatus?.projectAdmin">
             <span *ngIf="isExpanded">Administration</span>
 
             <a class="sub-nav-item" [style.margin-left.px]="isExpanded ? '30' : '0'" [routerLink]="['/roles']"
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
index fb144a6..c6f8fe8 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
@@ -62,6 +62,7 @@
 
     #dialog-box {
       color: $modal-text-color;
+      min-height: 150px;
 
       .dialog-header {
         padding-left: 30px;
@@ -268,7 +269,7 @@
     }
   }
 
-  span {
+  span:not(.description) {
     font-size: 14px;
     overflow: hidden;
     text-overflow: ellipsis;
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
index 380aa75..7b48bba 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
@@ -615,10 +615,8 @@
 .mat-table {
   .header-row {
     th.mat-header-cell {
-      font-size: 15px;
       font-family: 'Open Sans', sans-serif;
       font-weight: 600;
-      color: #607D8B;
     }
 
     .mat-cell {
@@ -658,3 +656,9 @@
     background-color: #baf0f7;
   }
 }
+.manage-roles{
+  .mat-horizontal-content-container{
+    overflow: visible !important;
+  }
+}
+
diff --git a/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts b/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
index 0478533..b7d9abc 100644
--- a/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
+++ b/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
@@ -36,15 +36,15 @@
         'resourceName': 'resourceName',
         'cost': 'cost',
         'costTotal': 'cost_total',
-        'currencyCode': 'currency_code',
+        'currencyCode': 'currencyCode',
         'dateFrom': 'from',
         'dateTo': 'to',
         'service': 'meterCategory',
-        'service_filter_key': 'category',
+        'service_filter_key': 'meterCategory',
         'type': '',
         'resourceType': 'resource_type',
-        'instance_size': 'shape',
-        'dlabId': 'dlab_id'
+        'instance_size': 'size',
+        'dlabId': 'dlabId'
     },
     'service': 'Category',
     'type': '',
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
index a8b01fa..5ac537b 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
@@ -20,7 +20,6 @@
 package com.epam.dlab.backendapi.resources;
 
 import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.resources.dto.UserDTO;
 import com.epam.dlab.backendapi.service.EnvironmentService;
 import com.epam.dlab.exceptions.ResourceConflictException;
 import io.dropwizard.auth.AuthenticationException;
@@ -31,17 +30,23 @@
 import org.junit.Test;
 
 import javax.ws.rs.client.Entity;
-import javax.ws.rs.core.GenericType;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import java.util.Collections;
-import java.util.List;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class EnvironmentResourceTest extends TestBase {
 
@@ -56,45 +61,9 @@
 	}
 
 	@Test
-	public void getUsersWithActiveEnv() {
-		when(environmentService.getUsers()).thenReturn(Collections.singletonList(new UserDTO("activeUser",
-				null, UserDTO.Status.ACTIVE)));
-		final Response response = resources.getJerseyTest()
-				.target("/environment/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(Collections.singletonList(new UserDTO("activeUser", null, UserDTO.Status.ACTIVE)),
-				response.readEntity(new GenericType<List<UserDTO>>() {
-				}));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).getUsers();
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void getUsersWithActiveEnvWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(environmentService.getUsers()).thenReturn(Collections.singletonList(new UserDTO("activeUser",
-				null, UserDTO.Status.ACTIVE)));
-		final Response response = resources.getJerseyTest()
-				.target("/environment/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
-
-		assertEquals(HttpStatus.SC_FORBIDDEN, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(environmentService);
-	}
-
-	@Test
 	public void getAllEnv() {
-		when(environmentService.getAllEnv()).thenReturn(Collections.emptyList());
+		UserInfo userInfo = getUserInfo();
+		when(environmentService.getAllEnv(userInfo)).thenReturn(Collections.emptyList());
 		final Response response = resources.getJerseyTest()
 				.target("/environment/all")
 				.request()
@@ -104,14 +73,14 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).getAllEnv();
+		verify(environmentService).getAllEnv(eq(userInfo));
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void getAllEnvWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(environmentService.getAllEnv()).thenReturn(Collections.emptyList());
+		when(environmentService.getAllEnv(getUserInfo())).thenReturn(Collections.emptyList());
 		final Response response = resources.getJerseyTest()
 				.target("/environment/all")
 				.request()
@@ -125,59 +94,10 @@
 	}
 
 	@Test
-	public void stopEnv() {
-		doNothing().when(environmentService).stopEnvironment(any(UserInfo.class), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).stopEnvironment(new UserInfo(USER, TOKEN), USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEnvWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(environmentService).stopEnvironment(any(UserInfo.class), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_FORBIDDEN, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verifyZeroInteractions(environmentService);
-	}
-
-	@Test
-	public void stopEnvWithResourceConflictException() {
-		doThrow(new ResourceConflictException("Can not stop environment because one of the user resources is in " +
-				"status CREATING or STARTING")).when(environmentService).stopEnvironment(any(UserInfo.class), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/environment/stop")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.text(USER));
-
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-
-		verify(environmentService).stopEnvironment(new UserInfo(USER, TOKEN), USER);
-		verifyNoMoreInteractions(environmentService);
-	}
-
-	@Test
 	public void stopNotebook() {
-		doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString());
+		doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName")
+				.target("/environment/stop/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -185,16 +105,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+		verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void stopNotebookWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString());
+		doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName")
+				.target("/environment/stop/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -208,9 +128,9 @@
 	@Test
 	public void stopNotebookWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not stop notebook because its status is CREATING or STARTING"))
-				.when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString());
+				.when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName")
+				.target("/environment/stop/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -218,15 +138,15 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+		verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void stopCluster() {
-		doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+		doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName/compName")
+				.target("/environment/stop/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -234,16 +154,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+		verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void stopClusterWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+		doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName/compName")
+				.target("/environment/stop/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -257,9 +177,9 @@
 	@Test
 	public void stopClusterWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not stop cluster because its status is CREATING or STARTING"))
-				.when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+				.when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/stop/explName/compName")
+				.target("/environment/stop/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -267,15 +187,15 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+		verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateNotebook() {
-		doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString());
+		doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName")
+				.target("/environment/terminate/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -283,16 +203,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+		verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateNotebookWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString());
+		doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName")
+				.target("/environment/terminate/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -306,9 +226,9 @@
 	@Test
 	public void terminateNotebookWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not terminate notebook because its status is CREATING or STARTING"))
-				.when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString());
+				.when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName")
+				.target("/environment/terminate/projectName/explName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -316,15 +236,15 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+		verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateCluster() {
-		doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+		doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName/compName")
+				.target("/environment/terminate/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -332,16 +252,16 @@
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+		verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 
 	@Test
 	public void terminateClusterWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+		doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName/compName")
+				.target("/environment/terminate/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -355,9 +275,9 @@
 	@Test
 	public void terminateClusterWithResourceConflictException() {
 		doThrow(new ResourceConflictException("Can not terminate cluster because its status is CREATING or STARTING"))
-				.when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+				.when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/environment/terminate/explName/compName")
+				.target("/environment/terminate/projectName/explName/compName")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.post(Entity.text(USER));
@@ -365,7 +285,7 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+		verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
 		verifyNoMoreInteractions(environmentService);
 	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
index f6f4692..bccfa8b 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
@@ -47,7 +47,14 @@
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class ExploratoryResourceTest extends TestBase {
 
@@ -154,9 +161,9 @@
 
 	@Test
 	public void stop() {
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/stop")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/stop")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -165,16 +172,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).stop(getUserInfo(), "someName");
+		verify(exploratoryService).stop(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void stopWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/stop")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/stop")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -183,16 +190,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).stop(getUserInfo(), "someName");
+		verify(exploratoryService).stop(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void stopWithException() {
 		doThrow(new DlabException("Could not stop exploratory environment"))
-				.when(exploratoryService).stop(any(UserInfo.class), anyString());
+				.when(exploratoryService).stop(any(UserInfo.class), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/stop")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/stop")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -204,15 +211,15 @@
 		assertTrue(actualJson.contains(expectedJson));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).stop(getUserInfo(), "someName");
+		verify(exploratoryService).stop(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void terminate() {
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/terminate")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -221,16 +228,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).terminate(getUserInfo(), "someName");
+		verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void terminateWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn("someUuid");
+		when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/terminate")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -239,16 +246,16 @@
 		assertEquals("someUuid", response.readEntity(String.class));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).terminate(getUserInfo(), "someName");
+		verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void terminateWithException() {
 		doThrow(new DlabException("Could not terminate exploratory environment"))
-				.when(exploratoryService).terminate(any(UserInfo.class), anyString());
+				.when(exploratoryService).terminate(any(UserInfo.class), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/terminate")
+				.target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.delete();
@@ -260,22 +267,22 @@
 		assertTrue(actualJson.contains(expectedJson));
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryService).terminate(getUserInfo(), "someName");
+		verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
 	@Test
 	public void updateSparkConfig() {
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/reconfigure")
+				.target("/infrastructure_provision/exploratory_environment/someProject/someName/reconfigure")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.put(Entity.json(Collections.singletonList(new ClusterConfig())));
 
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 
-		verify(exploratoryService).updateClusterConfig(refEq(getUserInfo()), eq("someName"),
-				eq(Collections.singletonList(new ClusterConfig())));
+		verify(exploratoryService).updateClusterConfig(refEq(getUserInfo()), eq("someProject"),
+				eq("someName"), eq(Collections.singletonList(new ClusterConfig())));
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
@@ -283,9 +290,9 @@
 	public void getSparkConfig() {
 		final ClusterConfig config = new ClusterConfig();
 		config.setClassification("test");
-		when(exploratoryService.getClusterConfig(any(UserInfo.class), anyString())).thenReturn(Collections.singletonList(config));
+		when(exploratoryService.getClusterConfig(any(UserInfo.class), anyString(), anyString())).thenReturn(Collections.singletonList(config));
 		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/someName/cluster/config")
+				.target("/infrastructure_provision/exploratory_environment/someProject/someName/cluster/config")
 				.request()
 				.header("Authorization", "Bearer " + TOKEN)
 				.get();
@@ -296,7 +303,7 @@
 		assertEquals(1, clusterConfigs.size());
 		assertEquals("test", clusterConfigs.get(0).getClassification());
 
-		verify(exploratoryService).getClusterConfig(refEq(getUserInfo()), eq("someName"));
+		verify(exploratoryService).getClusterConfig(refEq(getUserInfo()), eq("someProject"), eq("someName"));
 		verifyNoMoreInteractions(exploratoryService);
 	}
 
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
index 2a7e4c0..38c0e46 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
@@ -45,10 +45,16 @@
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class ImageExploratoryResourceTest extends TestBase {
-
+	private static final String PROJECT = "projectName";
 	private ImageExploratoryService imageExploratoryService = mock(ImageExploratoryService.class);
 	private RequestId requestId = mock(RequestId.class);
 
@@ -63,7 +69,7 @@
 
 	@Test
 	public void createImage() {
-		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString()))
+		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString()))
 				.thenReturn("someUuid");
 		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
@@ -75,7 +81,7 @@
 		assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
+		verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName",
 				"someImageName", "someDescription");
 		verify(requestId).put(USER.toLowerCase(), "someUuid");
 		verifyNoMoreInteractions(imageExploratoryService, requestId);
@@ -84,7 +90,7 @@
 	@Test
 	public void createImageWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
-		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString()))
+		when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString()))
 				.thenReturn("someUuid");
 		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 		final Response response = resources.getJerseyTest()
@@ -96,8 +102,7 @@
 		assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
-				"someImageName", "someDescription");
+		verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName", "someImageName", "someDescription");
 		verify(requestId).put(USER.toLowerCase(), "someUuid");
 		verifyNoMoreInteractions(imageExploratoryService, requestId);
 	}
@@ -105,7 +110,7 @@
 	@Test
 	public void createImageWithException() {
 		doThrow(new ResourceAlreadyExistException("Image with name is already exist"))
-				.when(imageExploratoryService).createImage(any(UserInfo.class), anyString(), anyString(), anyString());
+				.when(imageExploratoryService).createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure_provision/exploratory_environment/image")
 				.request()
@@ -115,8 +120,7 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
-				"someImageName", "someDescription");
+		verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName", "someImageName", "someDescription");
 		verifyNoMoreInteractions(imageExploratoryService);
 		verifyZeroInteractions(requestId);
 	}
@@ -263,6 +267,7 @@
 	private ExploratoryImageCreateFormDTO getExploratoryImageCreateFormDTO() {
 		ExploratoryImageCreateFormDTO eicfDto = new ExploratoryImageCreateFormDTO("someImageName", "someDescription");
 		eicfDto.setNotebookName("someNotebookName");
+		eicfDto.setProjectName(PROJECT);
 		return eicfDto;
 	}
 
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java
index a8a872a..0f63cb9 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/InfrastructureInfoResourceTest.java
@@ -39,6 +39,7 @@
 import static org.junit.Assert.assertNull;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyString;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.eq;
 import static org.mockito.Mockito.mock;
@@ -93,7 +94,7 @@
 	@Test
 	public void healthStatus() {
 		HealthStatusPageDTO hspDto = getHealthStatusPageDTO();
-		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean())).thenReturn(hspDto);
+		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean())).thenReturn(hspDto);
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.queryParam("full", "1")
@@ -105,7 +106,7 @@
 		assertEquals(hspDto.getStatus(), response.readEntity(HealthStatusPageDTO.class).getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
@@ -113,7 +114,7 @@
 	public void healthStatusWithFailedAuth() throws AuthenticationException {
 		authFailSetup();
 		HealthStatusPageDTO hspDto = getHealthStatusPageDTO();
-		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean())).thenReturn(hspDto);
+		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean())).thenReturn(hspDto);
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.queryParam("full", "1")
@@ -125,14 +126,14 @@
 		assertEquals(hspDto.getStatus(), response.readEntity(HealthStatusPageDTO.class).getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(true));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
 	@Test
 	public void healthStatusWithDefaultQueryParam() {
 		HealthStatusPageDTO hspDto = getHealthStatusPageDTO();
-		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean())).thenReturn(hspDto);
+		when(infrastructureInfoService.getHeathStatus(any(UserInfo.class), anyBoolean())).thenReturn(hspDto);
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.request()
@@ -143,14 +144,14 @@
 		assertEquals(hspDto.getStatus(), response.readEntity(HealthStatusPageDTO.class).getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
 	@Test
 	public void healthStatusWithException() {
 		doThrow(new DlabException("Could not return status of resources for user"))
-				.when(infrastructureInfoService).getHeathStatus(any(UserInfo.class), anyBoolean(), anyBoolean());
+				.when(infrastructureInfoService).getHeathStatus(any(UserInfo.class), anyBoolean());
 		final Response response = resources.getJerseyTest()
 				.target("/infrastructure/status")
 				.request()
@@ -160,7 +161,7 @@
 		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false), anyBoolean());
+		verify(infrastructureInfoService).getHeathStatus(refEq(getUserInfo()), eq(false));
 		verifyNoMoreInteractions(infrastructureInfoService);
 	}
 
@@ -200,8 +201,8 @@
 	}
 
 	private HealthStatusPageDTO getHealthStatusPageDTO() {
-		HealthStatusPageDTO hspdto = new HealthStatusPageDTO();
-		hspdto.setStatus("someStatus");
-		return hspdto;
+		return HealthStatusPageDTO.builder()
+				.status("someStatus")
+				.build();
 	}
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
index 50f6763..c7f5ced 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
@@ -22,7 +22,12 @@
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.dao.ExploratoryDAO;
 import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.dto.*;
+import com.epam.dlab.backendapi.resources.dto.LibInfoRecord;
+import com.epam.dlab.backendapi.resources.dto.LibInstallFormDTO;
+import com.epam.dlab.backendapi.resources.dto.LibKey;
+import com.epam.dlab.backendapi.resources.dto.LibraryDTO;
+import com.epam.dlab.backendapi.resources.dto.LibraryStatus;
+import com.epam.dlab.backendapi.resources.dto.SearchLibsFormDTO;
 import com.epam.dlab.backendapi.service.ExternalLibraryService;
 import com.epam.dlab.backendapi.service.LibraryService;
 import com.epam.dlab.dto.UserInstanceDTO;
@@ -50,331 +55,355 @@
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class LibExploratoryResourceTest extends TestBase {
 
-	private static final String LIB_GROUP = "group";
-	private static final String LIB_NAME = "name";
-	private static final String LIB_VERSION = "version";
-	private static final String EXPLORATORY_NAME = "explName";
-	private static final String COMPUTATIONAL_NAME = "compName";
-	private static final String UUID = "uid";
-	private ExploratoryDAO exploratoryDAO = mock(ExploratoryDAO.class);
-	private LibraryService libraryService = mock(LibraryService.class);
-	private RESTService provisioningService = mock(RESTService.class);
-	private ExternalLibraryService externalLibraryService = mock(ExternalLibraryService.class);
-	private RequestId requestId = mock(RequestId.class);
+    private static final String LIB_GROUP = "group";
+    private static final String LIB_NAME = "name";
+    private static final String LIB_VERSION = "version";
+    private static final String EXPLORATORY_NAME = "explName";
+    private static final String PROJECT = "projectName";
+    private static final String COMPUTATIONAL_NAME = "compName";
+    private static final String UUID = "uid";
+    private ExploratoryDAO exploratoryDAO = mock(ExploratoryDAO.class);
+    private LibraryService libraryService = mock(LibraryService.class);
+    private RESTService provisioningService = mock(RESTService.class);
+    private ExternalLibraryService externalLibraryService = mock(ExternalLibraryService.class);
+    private RequestId requestId = mock(RequestId.class);
 
-	@Rule
-	public final ResourceTestRule resources = getResourceTestRuleInstance(
-			new LibExploratoryResource(exploratoryDAO, libraryService, externalLibraryService));
+    @Rule
+    public final ResourceTestRule resources = getResourceTestRuleInstance(
+            new LibExploratoryResource(exploratoryDAO, libraryService, externalLibraryService));
 
-	@Before
-	public void setup() throws AuthenticationException {
-		authSetup();
-	}
+    @Before
+    public void setup() throws AuthenticationException {
+        authSetup();
+    }
 
-	@Test
-	public void getLibGroupListWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn
-				(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+    @Test
+    public void getLibGroupListWithFailedAuth() throws AuthenticationException {
+        authFailSetup();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibGroupListWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn
-				(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn
+                (getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibGroupListWithoutComputationalWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibGroupListWithoutComputationalWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_groups")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_groups")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibList() {
-		when(libraryService.getLibs(anyString(), anyString(), anyString())).thenReturn(getDocuments());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(libraryService.getLibs(anyString(), anyString(), anyString(), anyString())).thenReturn(getDocuments());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
-		}));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
+        }));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(libraryService.getLibs(anyString(), anyString(), anyString())).thenReturn(getDocuments());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(libraryService.getLibs(anyString(), anyString(), anyString(), anyString())).thenReturn(getDocuments());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
-		}));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
+        }));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListWithException() {
-		doThrow(new DlabException("Cannot load installed libraries"))
-				.when(libraryService).getLibs(anyString(), anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list")
-				.queryParam("exploratory_name", "explName")
-				.queryParam("computational_name", "compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new DlabException("Cannot load installed libraries"))
+                .when(libraryService).getLibs(anyString(), anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list")
+                .queryParam("project_name", "projectName")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("computational_name", "compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListFormatted() {
-		when(libraryService.getLibInfo(anyString(), anyString())).thenReturn(getLibInfoRecords());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(libraryService.getLibInfo(anyString(), anyString(), anyString())).thenReturn(getLibInfoRecords());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListFormattedWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(libraryService.getLibInfo(anyString(), anyString())).thenReturn(getLibInfoRecords());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(libraryService.getLibInfo(anyString(), anyString(), anyString())).thenReturn(getLibInfoRecords());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void getLibListFormattedWithException() {
-		doThrow(new DlabException("Cannot load  formatted list of installed libraries"))
-				.when(libraryService).getLibInfo(anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
-				.queryParam("exploratory_name", "explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new DlabException("Cannot load  formatted list of installed libraries"))
+                .when(libraryService).getLibInfo(anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+                .queryParam("exploratory_name", "explName")
+                .queryParam("project_name", "projectName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(libraryService);
-	}
+        verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(libraryService);
+    }
 
 	@Test
 	public void libInstall() {
-		when(libraryService.installComputationalLibs(any(UserInfo.class), anyString(), anyString(),
-				anyListOf(LibInstallDTO.class))).thenReturn(UUID);
-		LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
-		libInstallFormDTO.setComputationalName(COMPUTATIONAL_NAME);
-		libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
-		libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_install")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(libInstallFormDTO));
+        when(libraryService.installComputationalLibs(any(UserInfo.class), anyString(), anyString(),
+                anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
+        LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
+        libInstallFormDTO.setComputationalName(COMPUTATIONAL_NAME);
+        libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
+        libInstallFormDTO.setProject(PROJECT);
+        libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_install")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(libInstallFormDTO));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-		assertEquals(UUID, response.readEntity(String.class));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(UUID, response.readEntity(String.class));
 
-		verify(libraryService).installComputationalLibs(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
-				eq(COMPUTATIONAL_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
-		verifyNoMoreInteractions(libraryService);
-		verifyZeroInteractions(provisioningService, requestId);
-	}
+        verify(libraryService).installComputationalLibs(refEq(getUserInfo()), eq(PROJECT),
+                eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
+        verifyNoMoreInteractions(libraryService);
+        verifyZeroInteractions(provisioningService, requestId);
+    }
 
 
 	@Test
 	public void libInstallWithoutComputational() {
-		when(libraryService.installExploratoryLibs(any(UserInfo.class), anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
-		LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
-		libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
-		libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/lib_install")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(libInstallFormDTO));
+        when(libraryService.installExploratoryLibs(any(UserInfo.class), anyString(), anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
+        LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
+        libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
+        libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
+        libInstallFormDTO.setProject(PROJECT);
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/lib_install")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(libInstallFormDTO));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-		assertEquals(UUID, response.readEntity(String.class));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(UUID, response.readEntity(String.class));
 
-		verify(libraryService).installExploratoryLibs(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
-				eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
-		verifyNoMoreInteractions(libraryService);
-		verifyZeroInteractions(provisioningService, requestId);
-	}
+        verify(libraryService).installExploratoryLibs(refEq(getUserInfo()), eq(PROJECT),
+                eq(EXPLORATORY_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
+        verifyNoMoreInteractions(libraryService);
+        verifyZeroInteractions(provisioningService, requestId);
+    }
 
 	@Test
 	public void getLibraryListWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
-				.thenReturn(getUserInstanceDto());
-		SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
-		searchLibsFormDTO.setComputationalName("compName");
-		searchLibsFormDTO.setNotebookName("explName");
-		searchLibsFormDTO.setGroup("someGroup");
-		searchLibsFormDTO.setStartWith("someText");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/search/lib_list")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(searchLibsFormDTO));
+        authFailSetup();
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+        searchLibsFormDTO.setComputationalName("compName");
+        searchLibsFormDTO.setNotebookName("explName");
+        searchLibsFormDTO.setGroup("someGroup");
+        searchLibsFormDTO.setStartWith("someText");
+        searchLibsFormDTO.setProjectName("projectName");
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(searchLibsFormDTO));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibraryListWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
-				.thenReturn(getUserInstanceDto());
-		SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
-		searchLibsFormDTO.setComputationalName("compName");
-		searchLibsFormDTO.setNotebookName("explName");
-		searchLibsFormDTO.setGroup("someGroup");
-		searchLibsFormDTO.setStartWith("someText");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/search/lib_list")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(searchLibsFormDTO));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+        searchLibsFormDTO.setComputationalName("compName");
+        searchLibsFormDTO.setNotebookName("explName");
+        searchLibsFormDTO.setGroup("someGroup");
+        searchLibsFormDTO.setStartWith("someText");
+        searchLibsFormDTO.setProjectName("projectName");
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(searchLibsFormDTO));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getLibraryListWithoutComputationalWithException() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString()))
-				.thenReturn(getUserInstanceDto());
-		SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
-		searchLibsFormDTO.setComputationalName("");
-		searchLibsFormDTO.setNotebookName("explName");
-		searchLibsFormDTO.setGroup("someGroup");
-		searchLibsFormDTO.setStartWith("someText");
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/search/lib_list")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(searchLibsFormDTO));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
+                .thenReturn(getUserInstanceDto());
+        SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+        searchLibsFormDTO.setComputationalName("");
+        searchLibsFormDTO.setNotebookName("explName");
+        searchLibsFormDTO.setGroup("someGroup");
+        searchLibsFormDTO.setStartWith("someText");
+        searchLibsFormDTO.setProjectName("projectName");
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(searchLibsFormDTO));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getMavenArtifact() {
@@ -422,11 +451,14 @@
 	}
 
 	private UserInstanceDTO getUserInstanceDto() {
-		UserComputationalResource ucResource = new UserComputationalResource();
-		ucResource.setComputationalName("compName");
-		return new UserInstanceDTO().withUser(USER).withExploratoryName("explName")
-				.withResources(singletonList(ucResource));
-	}
+        UserComputationalResource ucResource = new UserComputationalResource();
+        ucResource.setComputationalName("compName");
+        return new UserInstanceDTO()
+                .withUser(USER)
+                .withExploratoryName("explName")
+                .withProject(PROJECT)
+                .withResources(singletonList(ucResource));
+    }
 
 	private List<Document> getDocuments() {
 		return singletonList(new Document());
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
index c1df4c7..7c78fed 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
@@ -2,6 +2,7 @@
 
 import com.epam.dlab.auth.UserInfo;
 import com.epam.dlab.backendapi.resources.dto.KeysDTO;
+import com.epam.dlab.backendapi.resources.dto.ProjectActionFormDTO;
 import com.epam.dlab.backendapi.service.AccessKeyService;
 import com.epam.dlab.backendapi.service.ProjectService;
 import com.epam.dlab.exceptions.DlabException;
@@ -16,11 +17,18 @@
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+import java.util.Collections;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
 
 public class ProjectResourceTest extends TestBase {
     private ProjectService projectService = mock(ProjectService.class);
@@ -30,36 +38,34 @@
     public final ResourceTestRule resources = getResourceTestRuleInstance(
             new ProjectResource(projectService, keyService));
 
-
     @Before
     public void setup() throws AuthenticationException {
         authSetup();
     }
 
     @Test
-    public void getProjectsForManaging() {
+    public void stopProject() {
         final Response response = resources.getJerseyTest()
-                .target("project/managing")
+                .target("project/stop")
                 .request()
                 .header("Authorization", "Bearer " + TOKEN)
-                .get();
+                .post(Entity.json(getProjectActionDTO()));
 
-        assertEquals(HttpStatus.SC_OK, response.getStatus());
-        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
-        verify(projectService, times(1)).getProjectsForManaging();
+        assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
+        verify(projectService).stopWithResources(any(UserInfo.class), anyList(), anyString());
         verifyNoMoreInteractions(projectService);
     }
 
     @Test
-    public void stopProjectWithResources() {
+    public void startProject() {
         final Response response = resources.getJerseyTest()
-                .target("project/managing/stop/" + "projectName")
+                .target("project/start")
                 .request()
                 .header("Authorization", "Bearer " + TOKEN)
-                .post(Entity.json(""));
+                .post(Entity.json(getProjectActionDTO()));
 
         assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
-        verify(projectService).stopWithResources(any(UserInfo.class), anyString());
+        verify(projectService).start(any(UserInfo.class), anyList(), anyString());
         verifyNoMoreInteractions(projectService);
     }
 
@@ -98,4 +104,8 @@
         verify(keyService).generateKeys(getUserInfo());
         verifyNoMoreInteractions(keyService);
     }
-}
\ No newline at end of file
+
+    private ProjectActionFormDTO getProjectActionDTO() {
+        return new ProjectActionFormDTO("DLAB", Collections.singletonList("https://localhost:8083/"));
+    }
+}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
index 08e601e..c763238 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
@@ -36,7 +36,12 @@
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
 import java.time.temporal.ChronoUnit;
 import java.util.Arrays;
 import java.util.Collections;
@@ -46,7 +51,13 @@
 import static org.junit.Assert.assertNull;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 public class SchedulerJobResourceTest extends TestBase {
 
@@ -63,228 +74,228 @@
 
 	@Test
 	public void updateExploratoryScheduler() {
-		doNothing().when(schedulerJobService)
-				.updateExploratorySchedulerData(anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doNothing().when(schedulerJobService)
+                .updateExploratorySchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(),
-				"explName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+                "explName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void updateExploratorySchedulerWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(schedulerJobService)
-				.updateExploratorySchedulerData(anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", String.join(" ", "Bearer", TOKEN))
-				.post(Entity.json(getSchedulerJobDTO()));
+        authFailSetup();
+        doNothing().when(schedulerJobService)
+                .updateExploratorySchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", String.join(" ", "Bearer", TOKEN))
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(),
-				"explName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+                "explName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void updateExploratorySchedulerWithException() {
-		doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
-				.when(schedulerJobService).updateExploratorySchedulerData(anyString(), anyString(),
-				any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
+                .when(schedulerJobService).updateExploratorySchedulerData(anyString(), anyString(), anyString(),
+                any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "explName",
-				getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+                "explName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void upsertComputationalScheduler() {
-		doNothing().when(schedulerJobService)
-				.updateComputationalSchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doNothing().when(schedulerJobService)
+                .updateComputationalSchedulerData(anyString(), anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
-				"compName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+                "explName", "compName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void upsertComputationalSchedulerWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		doNothing().when(schedulerJobService)
-				.updateComputationalSchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        authFailSetup();
+        doNothing().when(schedulerJobService)
+                .updateComputationalSchedulerData(anyString(), anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
-				"compName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+                "explName", "compName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void upsertComputationalSchedulerWithException() {
-		doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
-				.when(schedulerJobService).updateComputationalSchedulerData(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class));
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.post(Entity.json(getSchedulerJobDTO()));
+        doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
+                .when(schedulerJobService).updateComputationalSchedulerData(anyString(), anyString(), anyString(),
+                anyString(), any(SchedulerJobDTO.class));
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .post(Entity.json(getSchedulerJobDTO()));
 
-		assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
-				"compName", getSchedulerJobDTO());
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+                "explName", "compName", getSchedulerJobDTO());
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratory() {
-		when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratoryWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratoryWithException() {
-		doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory"))
-				.when(schedulerJobService).fetchSchedulerJobForUserAndExploratory(anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory"))
+                .when(schedulerJobService).fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForComputationalResource() {
-		when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
-				"explName", "compName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+                "explName", "compName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForComputationalResourceWithFailedAuth() throws AuthenticationException {
-		authFailSetup();
-		when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString()))
-				.thenReturn(getSchedulerJobDTO());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        authFailSetup();
+        when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString()))
+                .thenReturn(getSchedulerJobDTO());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
-				"explName", "compName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+                "explName", "compName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void fetchSchedulerJobForComputationalResourceWithException() {
-		doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory with " +
-				"computational resource")).when(schedulerJobService)
-				.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString());
-		final Response response = resources.getJerseyTest()
-				.target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory with " +
+                "computational resource")).when(schedulerJobService)
+                .fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString());
+        final Response response = resources.getJerseyTest()
+                .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
-				"explName", "compName");
-		verifyNoMoreInteractions(schedulerJobService);
-	}
+        verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+                "explName", "compName");
+        verifyNoMoreInteractions(schedulerJobService);
+    }
 
 	@Test
 	public void testGetActiveSchedulers() {
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java
index 1953694..713eda9 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserGroupResourceTest.java
@@ -19,14 +19,13 @@
 
 package com.epam.dlab.backendapi.resources;
 
+import com.epam.dlab.auth.UserInfo;
+import com.epam.dlab.backendapi.dao.ProjectDAO;
 import com.epam.dlab.backendapi.resources.dto.GroupDTO;
-import com.epam.dlab.backendapi.resources.dto.UpdateRoleGroupDto;
-import com.epam.dlab.backendapi.resources.dto.UpdateUserGroupDto;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 import com.epam.dlab.backendapi.service.UserGroupService;
 import io.dropwizard.auth.AuthenticationException;
 import io.dropwizard.testing.junit.ResourceTestRule;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.http.HttpStatus;
 import org.junit.Before;
 import org.junit.Rule;
@@ -41,26 +40,31 @@
 import java.util.List;
 import java.util.Set;
 
-import static java.util.Collections.singleton;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 public class UserGroupResourceTest extends TestBase {
 
-	private static final String USER = "user";
-	private static final String ROLE_ID = "id";
-	private static final String GROUP = "group";
-	private UserGroupService userGroupService = mock(UserGroupService.class);
+    private static final String USER = "user";
+    private static final String ROLE_ID = "id";
+    private static final String GROUP = "group";
+    private UserGroupService userGroupService = mock(UserGroupService.class);
+    private ProjectDAO projectDAO = mock(ProjectDAO.class);
 
-	@Before
-	public void setup() throws AuthenticationException {
-		authSetup();
-	}
+    @Before
+    public void setup() throws AuthenticationException {
+        authSetup();
+    }
 
-	@Rule
-	public final ResourceTestRule resources =
-			getResourceTestRuleInstance(new UserGroupResource(userGroupService));
+    @Rule
+    public final ResourceTestRule resources =
+            getResourceTestRuleInstance(new UserGroupResource(userGroupService));
 
 	@Test
 	public void createGroup() {
@@ -116,77 +120,31 @@
 
 		assertEquals(HttpStatus.SC_OK, response.getStatus());
 
-		verify(userGroupService).updateGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+		verify(userGroupService).updateGroup(getUserInfo(), GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
 		verifyNoMoreInteractions(userGroupService);
 	}
 
 	@Test
 	public void getGroups() {
-		when(userGroupService.getAggregatedRolesByGroup()).thenReturn(Collections.singletonList(getUserGroup()));
+        when(userGroupService.getAggregatedRolesByGroup(any(UserInfo.class))).thenReturn(Collections.singletonList(getUserGroup()));
 
-		final Response response = resources.getJerseyTest()
-				.target("/group")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.get();
+        final Response response = resources.getJerseyTest()
+                .target("/group")
+                .request()
+                .header("Authorization", "Bearer " + TOKEN)
+                .get();
 
-		final List<UserGroupDto> actualRoles = response.readEntity(new GenericType<List<UserGroupDto>>() {
-		});
+        final List<UserGroupDto> actualRoles = response.readEntity(new GenericType<List<UserGroupDto>>() {
+        });
 
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-		assertEquals(GROUP, actualRoles.get(0).getGroup());
-		assertTrue(actualRoles.get(0).getRoles().isEmpty());
-		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+        assertEquals(HttpStatus.SC_OK, response.getStatus());
+        assertEquals(GROUP, actualRoles.get(0).getGroup());
+        assertTrue(actualRoles.get(0).getRoles().isEmpty());
+        assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(userGroupService).getAggregatedRolesByGroup();
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void addRolesToGroup() {
-
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateRoleGroupDto(singleton(ROLE_ID), GROUP)));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-		verify(userGroupService).updateRolesForGroup(GROUP, singleton(ROLE_ID));
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void addRolesToGroupWithValidationException() {
-
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateRoleGroupDto(singleton(ROLE_ID), StringUtils.EMPTY)));
-
-		assertEquals(HttpStatus.SC_UNPROCESSABLE_ENTITY, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
-	@Test
-	public void deleteGroupFromRole() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.queryParam("group", GROUP)
-				.queryParam("roleId", ROLE_ID)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-
-		verify(userGroupService).removeGroupFromRole(singleton(GROUP), singleton(ROLE_ID));
-		verifyNoMoreInteractions(userGroupService);
-	}
+        verify(userGroupService).getAggregatedRolesByGroup(getUserInfo());
+        verifyNoMoreInteractions(userGroupService);
+    }
 
 	@Test
 	public void deleteGroup() {
@@ -203,89 +161,15 @@
 		verifyNoMoreInteractions(userGroupService);
 	}
 
-	@Test
-	public void deleteGroupFromRoleWithValidationException() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/role")
-				.queryParam("group", GROUP)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_BAD_REQUEST, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
-	@Test
-	public void addUserToGroup() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateUserGroupDto(GROUP, singleton(USER))));
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-		verify(userGroupService).addUsersToGroup(GROUP, singleton(USER));
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void addUserToGroupWithValidationException() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.put(Entity.json(new UpdateUserGroupDto(StringUtils.EMPTY, singleton(USER))));
-
-		assertEquals(HttpStatus.SC_UNPROCESSABLE_ENTITY, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
-	@Test
-	public void deleteUserFromGroup() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.queryParam("user", USER)
-				.queryParam("group", GROUP)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_OK, response.getStatus());
-
-
-		verify(userGroupService).removeUserFromGroup(GROUP, USER);
-		verifyNoMoreInteractions(userGroupService);
-	}
-
-	@Test
-	public void deleteUserFromGroupWithValidationException() {
-		final Response response = resources.getJerseyTest()
-				.target("/group/user")
-				.queryParam("group", GROUP)
-				.request()
-				.header("Authorization", "Bearer " + TOKEN)
-				.delete();
-
-		assertEquals(HttpStatus.SC_BAD_REQUEST, response.getStatus());
-
-		verifyZeroInteractions(userGroupService);
-	}
-
 	private UserGroupDto getUserGroup() {
 		return new UserGroupDto(GROUP, Collections.emptyList(), Collections.emptySet());
-	}
+    }
 
-	private GroupDTO getCreateGroupDto(String group, Set<String> roleIds) {
-		final GroupDTO dto = new GroupDTO();
-		dto.setName(group);
-		dto.setRoleIds(roleIds);
-		dto.setUsers(Collections.singleton(USER));
-		return dto;
-	}
-
-
+    private GroupDTO getCreateGroupDto(String group, Set<String> roleIds) {
+        final GroupDTO dto = new GroupDTO();
+        dto.setName(group);
+        dto.setRoleIds(roleIds);
+        dto.setUsers(Collections.singleton(USER));
+        return dto;
+    }
 }
\ No newline at end of file
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java
index 6c0f5be..c335db7 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/UserRoleResourceTest.java
@@ -35,9 +35,12 @@
 import java.util.Collections;
 import java.util.List;
 
-import static java.util.Collections.singleton;
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 public class UserRoleResourceTest extends TestBase {
 
@@ -59,7 +62,7 @@
 
 	@Test
 	public void getRoles() {
-		when(rolesService.getUserRoles()).thenReturn(Collections.singletonList(getUserRole()));
+		when(rolesService.getUserRoles(getUserInfo())).thenReturn(Collections.singletonList(getUserRole()));
 
 		final Response response = resources.getJerseyTest()
 				.target("/role")
@@ -74,7 +77,7 @@
 		assertEquals(ROLE_ID, actualRoles.get(0).getId());
 		assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
 
-		verify(rolesService).getUserRoles();
+		verify(rolesService).getUserRoles(getUserInfo());
 		verifyNoMoreInteractions(rolesService);
 	}
 
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java
index 83fab66..883630c 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/UserRoleServiceImplTest.java
@@ -19,6 +19,7 @@
 package com.epam.dlab.backendapi.service;
 
 import com.epam.dlab.backendapi.dao.UserRoleDao;
+import com.epam.dlab.backendapi.resources.TestBase;
 import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import org.junit.Rule;
@@ -29,37 +30,24 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
-public class UserRoleServiceImplTest {
+public class UserRoleServiceImplTest extends TestBase {
 
-	private static final String ROLE_ID = "roleId";
-	@Mock
-	private UserRoleDao dao;
-	@InjectMocks
-	private UserRoleServiceImpl userRoleService;
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
+    private static final String ROLE_ID = "roleId";
+    @Mock
+    private UserRoleDao dao;
+    @InjectMocks
+    private UserRoleServiceImpl userRoleService;
+    @Rule
+    public ExpectedException expectedException = ExpectedException.none();
 
-	@Test
-	public void getUserRoles() {
-		when(dao.findAll()).thenReturn(Collections.singletonList(getUserRole()));
-		final List<UserRoleDto> roles = userRoleService.getUserRoles();
-
-		assertEquals(1, roles.size());
-		assertEquals(ROLE_ID, roles.get(0).getId());
-
-		verify(dao).findAll();
-		verifyNoMoreInteractions(dao);
-	}
-
-
-	@Test
+    @Test
 	public void createRole() {
 
 		userRoleService.createRole(getUserRole());
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsBillingServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsBillingServiceTest.java
deleted file mode 100644
index 0c9cf26..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/aws/AwsBillingServiceTest.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.aws;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.aws.AwsBillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.exceptions.DlabException;
-import org.bson.Document;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import java.text.ParseException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
-
-@RunWith(MockitoJUnitRunner.class)
-public class AwsBillingServiceTest {
-
-	private UserInfo userInfo;
-	private BillingFilter billingFilter;
-	private Document basicDocument;
-
-	@Mock
-	private AwsBillingDAO billingDAO;
-
-	@InjectMocks
-	private AwsBillingService awsBillingService;
-
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
-
-	@Before
-	public void setUp() {
-		userInfo = getUserInfo();
-		billingFilter = new BillingFilter();
-		basicDocument = getBasicDocument();
-	}
-
-	@Test
-	public void getReportWithTheSameInstanceOfDocument() {
-		Document expectedDocument = new Document();
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(expectedDocument);
-
-		Document actualDocument = awsBillingService.getReport(userInfo, billingFilter);
-		assertEquals(expectedDocument, actualDocument);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void getReportWithAnotherInstanceOfDocument() {
-		Document expectedDocument = new Document().append("someField", "someValue");
-		Document anotherDocument = new Document().append("someField", "anotherValue");
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(anotherDocument);
-
-		Document actualDocument = awsBillingService.getReport(userInfo, billingFilter);
-		assertNotEquals(expectedDocument, actualDocument);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void getReportWithException() {
-		doThrow(new RuntimeException()).when(billingDAO).getReport(any(UserInfo.class), any(BillingFilter.class));
-
-		try {
-			awsBillingService.getReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot load billing report: null", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReport() {
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(basicDocument);
-
-		byte[] result = awsBillingService.downloadReport(userInfo, billingFilter);
-		assertNotNull(result);
-		assertTrue(result.length > 0);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWithInapproprietaryDateFormatInDocument() {
-		basicDocument.put("from", "someDateStart");
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(basicDocument);
-
-		try {
-			awsBillingService.downloadReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot prepare CSV file", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWhenDocumentHasNotAllRequiredFields() {
-		basicDocument.remove("lines");
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(basicDocument);
-
-		expectedException.expect(NullPointerException.class);
-
-		awsBillingService.downloadReport(userInfo, billingFilter);
-	}
-
-	@Test
-	public void getReportFileName() {
-		String result = awsBillingService.getReportFileName(userInfo, billingFilter);
-		assertEquals("aws-billing-report.csv", result);
-	}
-
-	@Test
-	public void getFirstLine() throws ParseException {
-		String result = awsBillingService.getFirstLine(basicDocument);
-		assertEquals("Service base name: someSBN  Resource tag ID: someTagResourceId  Available reporting " +
-				"period from: Mar 21, 2018 to: Mar 22, 2018", result);
-	}
-
-	@Test
-	public void getFirstLineWithException() throws ParseException {
-		basicDocument.put("from", "someStartDate");
-
-		expectedException.expect(ParseException.class);
-		expectedException.expectMessage("Unparseable date: \"someStartDate\"");
-
-		awsBillingService.getFirstLine(basicDocument);
-
-	}
-
-	@Test
-	public void getHeadersList() {
-		List<String> expectedResult1 =
-				Arrays.asList("USER", "PROJECT", "ENVIRONMENT NAME", "RESOURCE TYPE", "SHAPE", "SERVICE", "SERVICE CHARGES");
-		List<String> expectedResult2 = expectedResult1.subList(1, expectedResult1.size());
-
-		List<String> actualResult1 = awsBillingService.getHeadersList(true);
-		assertEquals(expectedResult1, actualResult1);
-
-		List<String> actualResult2 = awsBillingService.getHeadersList(false);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getLine() {
-		String expectedResult1 = "someUser,someProject,someId,someResType,someShape,someProduct,someCost someCode\n";
-		String actualResult1 = awsBillingService.getLine(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		basicDocument.remove("user");
-		String expectedResult2 = "someProject,someId,someResType,someShape,someProduct,someCost someCode\n";
-		String actualResult2 = awsBillingService.getLine(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getTotal() {
-		String expectedResult1 = ",,,,,,Total: someCostTotal someCode\n";
-		String actualResult1 = awsBillingService.getTotal(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		String expectedResult2 = ",,,,,Total: someCostTotal someCode\n";
-		String actualResult2 = awsBillingService.getTotal(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	private UserInfo getUserInfo() {
-		return new UserInfo("user", "token");
-	}
-
-	private Document getBasicDocument() {
-		return new Document().append("service_base_name", "someSBN").append("user", "someUser")
-				.append("project", "someProject").append("dlab_id", "someId")
-				.append("dlab_resource_type", "someResType").append("tag_resource_id", "someTagResourceId")
-				.append("from", "2018-03-21").append("to", "2018-03-22").append("full_report", false)
-				.append("shape", "someShape").append("product", "someProduct").append("cost", "someCost")
-				.append("cost_total", "someCostTotal").append("currency_code", "someCode")
-				.append("lines", Collections.singletonList(new Document()));
-	}
-
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureBillingServiceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureBillingServiceTest.java
deleted file mode 100644
index ebd4b83..0000000
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/azure/AzureBillingServiceTest.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package com.epam.dlab.backendapi.service.azure;
-
-import com.epam.dlab.auth.UserInfo;
-import com.epam.dlab.backendapi.dao.BillingDAO;
-import com.epam.dlab.backendapi.resources.dto.BillingFilter;
-import com.epam.dlab.exceptions.DlabException;
-import org.bson.Document;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.runners.MockitoJUnitRunner;
-
-import java.text.ParseException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
-
-@RunWith(MockitoJUnitRunner.class)
-public class AzureBillingServiceTest {
-
-	private UserInfo userInfo;
-	private BillingFilter billingFilter;
-	private Document basicDocument;
-
-	@Mock
-	private BillingDAO billingDAO;
-
-	@InjectMocks
-	private AzureBillingService azureBillingService;
-
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
-
-	@Before
-	public void setUp() {
-		userInfo = getUserInfo();
-		billingFilter = new BillingFilter();
-		basicDocument = getBasicDocument();
-	}
-
-	@Test
-	public void getReportWithTheSameInstanceOfDocument() {
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(new Document());
-
-		Document actualDocument = azureBillingService.getReport(userInfo, billingFilter);
-		assertEquals(new Document(), actualDocument);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void getReportWithException() {
-		doThrow(new RuntimeException()).when(billingDAO).getReport(any(UserInfo.class), any(BillingFilter.class));
-
-		try {
-			azureBillingService.getReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot load billing report: null", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReport() {
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(basicDocument);
-
-		byte[] result = azureBillingService.downloadReport(userInfo, billingFilter);
-		assertNotNull(result);
-		assertTrue(result.length > 0);
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWithInapproprietaryDateFormatInDocument() {
-		basicDocument.put("from", "someDateStart");
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(basicDocument);
-
-		try {
-			azureBillingService.downloadReport(userInfo, billingFilter);
-		} catch (DlabException e) {
-			assertEquals("Cannot prepare CSV file", e.getMessage());
-		}
-
-		verify(billingDAO).getReport(userInfo, billingFilter);
-		verifyNoMoreInteractions(billingDAO);
-	}
-
-	@Test
-	public void downloadReportWhenDocumentHasNotAllRequiredFields() {
-		basicDocument.remove("lines");
-		when(billingDAO.getReport(any(UserInfo.class), any(BillingFilter.class))).thenReturn(basicDocument);
-
-		expectedException.expect(NullPointerException.class);
-
-		azureBillingService.downloadReport(userInfo, billingFilter);
-	}
-
-	@Test
-	public void getReportFileName() {
-		String result = azureBillingService.getReportFileName(userInfo, billingFilter);
-		assertEquals("azure-billing-report.csv", result);
-	}
-
-	@Test
-	public void getFirstLine() throws ParseException {
-		String result = azureBillingService.getFirstLine(basicDocument);
-		assertEquals("Service base name: someSBN  Available reporting period from: Mar 21, 2018 " +
-				"to: Mar 22, 2018", result);
-	}
-
-	@Test
-	public void getFirstLineWithException() throws ParseException {
-		basicDocument.put("from", "someStartDate");
-
-		expectedException.expect(ParseException.class);
-
-		expectedException.expectMessage("Unparseable date: \"someStartDate\"");
-		azureBillingService.getFirstLine(basicDocument);
-	}
-
-	@Test
-	public void getHeadersList() {
-		List<String> expectedResult1 =
-				Arrays.asList("USER", "PROJECT" ,"ENVIRONMENT NAME", "RESOURCE TYPE", "INSTANCE SIZE", "CATEGORY", "SERVICE " +
-						"CHARGES");
-		List<String> expectedResult2 = expectedResult1.subList(1, expectedResult1.size());
-
-		List<String> actualResult1 = azureBillingService.getHeadersList(true);
-		assertEquals(expectedResult1, actualResult1);
-
-		List<String> actualResult2 = azureBillingService.getHeadersList(false);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getLine() {
-		String expectedResult1 = "someUser,someProject,someId,someResType,someSize,someMeterCategory,someCost someCode\n";
-		String actualResult1 = azureBillingService.getLine(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		basicDocument.remove("user");
-		String expectedResult2 = "someProject,someId,someResType,someSize,someMeterCategory,someCost someCode\n";
-		String actualResult2 = azureBillingService.getLine(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	@Test
-	public void getTotal() {
-		String expectedResult1 = ",,,,,,Total: someCost someCode\n";
-		String actualResult1 = azureBillingService.getTotal(true, basicDocument);
-		assertEquals(expectedResult1, actualResult1);
-
-		String expectedResult2 = ",,,,,Total: someCost someCode\n";
-		String actualResult2 = azureBillingService.getTotal(false, basicDocument);
-		assertEquals(expectedResult2, actualResult2);
-	}
-
-	private UserInfo getUserInfo() {
-		return new UserInfo("user", "token");
-	}
-
-	private Document getBasicDocument() {
-		return new Document().append("service_base_name", "someSBN").append("user", "someUser")
-				.append("project", "someProject").append("dlabId", "someId").append("resourceType", "someResType")
-				.append("from", "2018-03-21").append("size", "someSize").append("to", "2018-03-22")
-				.append("full_report", false).append("meterCategory", "someMeterCategory")
-				.append("costString", "someCost").append("currencyCode", "someCode")
-				.append("lines", Collections.singletonList(new Document()));
-	}
-
-}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
index 8331235..74fc7f0 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
@@ -38,10 +38,15 @@
 import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.aws.computational.ClusterConfig;
-import com.epam.dlab.dto.base.DataEngineType;
 import com.epam.dlab.dto.base.computational.ComputationalBase;
 import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.dto.computational.*;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStartDTO;
+import com.epam.dlab.dto.computational.ComputationalStatusDTO;
+import com.epam.dlab.dto.computational.ComputationalStopDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.SparkStandaloneClusterResource;
+import com.epam.dlab.dto.computational.UserComputationalResource;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
 import com.epam.dlab.rest.client.RESTService;
@@ -62,42 +67,60 @@
 import java.util.List;
 import java.util.Optional;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
 import static java.util.Collections.singletonList;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 
 @RunWith(MockitoJUnitRunner.class)
 public class ComputationalServiceImplTest {
 
-	private static final long MAX_INACTIVITY = 10L;
-	private static final String DOCKER_DLAB_DATAENGINE = "docker.dlab-dataengine";
-	private static final String DOCKER_DLAB_DATAENGINE_SERVICE = "docker.dlab-dataengine-service";
-	private static final String COMP_ID = "compId";
-	private final String USER = "test";
-	private final String TOKEN = "token";
-	private final String EXPLORATORY_NAME = "expName";
-	private final String COMP_NAME = "compName";
-	private final String UUID = "1234-56789765-4321";
-	private final LocalDateTime LAST_ACTIVITY = LocalDateTime.now().minusMinutes(MAX_INACTIVITY);
+    private static final long MAX_INACTIVITY = 10L;
+    private static final String DOCKER_DLAB_DATAENGINE = "docker.dlab-dataengine";
+    private static final String DOCKER_DLAB_DATAENGINE_SERVICE = "docker.dlab-dataengine-service";
+    private static final String COMP_ID = "compId";
+    private final String USER = "test";
+    private final String TOKEN = "token";
+    private final String EXPLORATORY_NAME = "expName";
+    private final String PROJECT = "project";
+    private final String COMP_NAME = "compName";
+    private final String UUID = "1234-56789765-4321";
+    private final LocalDateTime LAST_ACTIVITY = LocalDateTime.now().minusMinutes(MAX_INACTIVITY);
 
-	private UserInfo userInfo;
-	private List<ComputationalCreateFormDTO> formList;
-	private UserInstanceDTO userInstance;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusTerminating;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusFailed;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusStopping;
-	private ComputationalStatusDTO computationalStatusDTOWithStatusStarting;
-	private SparkStandaloneClusterResource sparkClusterResource;
-	private UserComputationalResource ucResource;
+    private UserInfo userInfo;
+    private List<ComputationalCreateFormDTO> formList;
+    private UserInstanceDTO userInstance;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusTerminating;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusFailed;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusStopping;
+    private ComputationalStatusDTO computationalStatusDTOWithStatusStarting;
+    private SparkStandaloneClusterResource sparkClusterResource;
+    private UserComputationalResource ucResource;
 
-	@Mock
-	private ProjectService projectService;
-	@Mock
-	private ExploratoryDAO exploratoryDAO;
-	@Mock
-	private ComputationalDAO computationalDAO;
+    @Mock
+    private ProjectService projectService;
+    @Mock
+    private ExploratoryDAO exploratoryDAO;
+    @Mock
+    private ComputationalDAO computationalDAO;
 	@Mock
 	private RESTService provisioningService;
 	@Mock
@@ -132,149 +155,148 @@
 
 	@Test
 	public void createSparkCluster() {
-		ProjectDTO projectDTO = getProjectDTO();
-		when(projectService.get(anyString())).thenReturn(projectDTO);
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ComputationalBase compBaseMocked = mock(ComputationalBase.class);
-		when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
-				any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class)))
-				.thenReturn(compBaseMocked);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        ComputationalBase compBaseMocked = mock(ComputationalBase.class);
+        when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class)))
+                .thenReturn(compBaseMocked);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
-		boolean creationResult =
-				computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
-		assertTrue(creationResult);
+        SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+        boolean creationResult =
+                computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+        assertTrue(creationResult);
 
-		verify(projectService).get("");
-		verify(computationalDAO)
-				.addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(sparkClusterResource));
+        verify(projectService).get(PROJECT);
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(sparkClusterResource));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newComputationalCreate(
-				refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(sparkClusterCreateForm), refEq(endpointDTO()));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newComputationalCreate(
+                refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(sparkClusterCreateForm), refEq(endpointDTO()));
 
-		verify(provisioningService)
-				.post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK, TOKEN, compBaseMocked,
-						String.class);
+        verify(provisioningService)
+                .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK, TOKEN, compBaseMocked,
+                        String.class);
 
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(projectService, configuration, computationalDAO, requestBuilder, provisioningService, requestId);
-	}
-	@Test
-	public void createSparkClusterWhenResourceAlreadyExists() {
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(false);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(projectService, configuration, computationalDAO, requestBuilder, provisioningService, requestId);
+    }
+
+    @Test
+    public void createSparkClusterWhenResourceAlreadyExists() {
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(false);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
 
-		boolean creationResult =
-				computationalService.createSparkCluster(userInfo, (SparkStandaloneClusterCreateForm) formList.get(0),
-						"");
-		assertFalse(creationResult);
+        boolean creationResult = computationalService.createSparkCluster(userInfo, (SparkStandaloneClusterCreateForm) formList.get(0),
+                PROJECT);
+        assertFalse(creationResult);
 
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(sparkClusterResource));
-		verifyNoMoreInteractions(configuration, computationalDAO);
-	}
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(sparkClusterResource));
+        verifyNoMoreInteractions(configuration, computationalDAO);
+    }
 
 	@Test
 	public void createSparkClusterWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(true);
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(true);
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
-		try {
-			computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
-		} catch (ResourceNotFoundException e) {
-			assertEquals("Exploratory for user with name not found", e.getMessage());
-		}
+        SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+        try {
+            computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+        } catch (ResourceNotFoundException e) {
+            assertEquals("Exploratory for user with name not found", e.getMessage());
+        }
 
-		verify(computationalDAO, never()).addComputational(USER, EXPLORATORY_NAME, sparkClusterResource);
-		verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
-				"self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(configuration, computationalDAO, exploratoryDAO);
-	}
+        verify(computationalDAO, never()).addComputational(USER, EXPLORATORY_NAME, PROJECT, sparkClusterResource);
+        verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
+                "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(configuration, computationalDAO, exploratoryDAO);
+    }
 
 	@Test
 	public void createSparkClusterWhenMethodNewComputationalCreateThrowsException() {
-		ProjectDTO projectDTO = getProjectDTO();
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(projectService.get(anyString())).thenReturn(projectDTO);
-		when(computationalDAO.addComputational(anyString(), anyString(),
-				any(SparkStandaloneClusterResource.class))).thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+                any(SparkStandaloneClusterResource.class))).thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
-				any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class));
+        doThrow(new DlabException("Cannot create instance of resource class "))
+                .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class));
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
-		try {
-			computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
-		} catch (DlabException e) {
-			assertEquals("Cannot create instance of resource class ", e.getMessage());
-		}
-		verify(projectService).get("");
-		verify(computationalDAO).addComputational(USER, EXPLORATORY_NAME, sparkClusterResource);
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newComputationalCreate(userInfo, projectDTO, userInstance, sparkClusterCreateForm, endpointDTO());
-		verifyNoMoreInteractions(projectService, configuration, computationalDAO, exploratoryDAO, requestBuilder);
-	}
+        SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+        try {
+            computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Cannot create instance of resource class ", e.getMessage());
+        }
+        verify(projectService).get(PROJECT);
+        verify(computationalDAO).addComputational(USER, EXPLORATORY_NAME, PROJECT, sparkClusterResource);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newComputationalCreate(userInfo, projectDTO, userInstance, sparkClusterCreateForm, endpointDTO());
+        verifyNoMoreInteractions(projectService, configuration, computationalDAO, exploratoryDAO, requestBuilder);
+    }
 
 	@Test
 	public void terminateComputationalEnvironment() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		String explId = "explId";
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
+        String explId = "explId";
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		String compId = "compId";
-		UserComputationalResource ucResource = new UserComputationalResource();
-		ucResource.setComputationalName(COMP_NAME);
-		ucResource.setImageName("dataengine-service");
-		ucResource.setComputationalId(compId);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+        String compId = "compId";
+        UserComputationalResource ucResource = new UserComputationalResource();
+        ucResource.setComputationalName(COMP_NAME);
+        ucResource.setImageName("dataengine-service");
+        ucResource.setComputationalId(compId);
+        when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
 
-		ComputationalTerminateDTO ctDto = new ComputationalTerminateDTO();
-		ctDto.setComputationalName(COMP_NAME);
-		ctDto.setExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), any(EndpointDTO.class))).thenReturn(ctDto);
+        ComputationalTerminateDTO ctDto = new ComputationalTerminateDTO();
+        ctDto.setComputationalName(COMP_NAME);
+        ctDto.setExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), any(EndpointDTO.class))).thenReturn(ctDto);
 
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalTerminateDTO.class), any()))
-				.thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalTerminateDTO.class), any()))
+                .thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
+        computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
+        verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
 
-		verify(provisioningService).post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC, TOKEN, ctDto,
-				String.class);
+        verify(provisioningService).post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC, TOKEN, ctDto,
+                String.class);
 
-		verify(requestId).put(USER, UUID);
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
-	}
+        verify(requestId).put(USER, UUID);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
+    }
 
 	@Test
 	public void terminateComputationalEnvironmentWhenMethodUpdateComputationalStatusThrowsException() {
@@ -285,11 +307,11 @@
 		when(computationalDAO.updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self")))
 				.thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
-		} catch (DlabException e) {
-			assertEquals("Could not update computational resource status", e.getMessage());
-		}
+        try {
+            computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        } catch (DlabException e) {
+            assertEquals("Could not update computational resource status", e.getMessage());
+        }
 
 		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
 		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
@@ -298,400 +320,384 @@
 
 	@Test
 	public void terminateComputationalEnvironmentWhenMethodFetchComputationalFieldsThrowsException() {
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		String explId = "explId";
-		when(exploratoryDAO.fetchExploratoryId(anyString(), anyString())).thenReturn(explId);
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		doThrow(new DlabException("Computational resource for user with exploratory name not found."))
-				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        doThrow(new DlabException("Computational resource for user with exploratory name not found."))
+                .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
-		} catch (DlabException e) {
-			assertEquals("Computational resource for user with exploratory name not found.", e.getMessage());
-		}
+        try {
+            computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        } catch (DlabException e) {
+            assertEquals("Computational resource for user with exploratory name not found.", e.getMessage());
+        }
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
-	}
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
+    }
 
 	@Test
 	public void terminateComputationalEnvironmentWhenMethodNewComputationalTerminateThrowsException() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		String compId = "compId";
-		UserComputationalResource ucResource = new UserComputationalResource();
-		ucResource.setComputationalName(COMP_NAME);
-		ucResource.setImageName("dataengine-service");
-		ucResource.setComputationalId(compId);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+        String compId = "compId";
+        UserComputationalResource ucResource = new UserComputationalResource();
+        ucResource.setComputationalName(COMP_NAME);
+        ucResource.setImageName("dataengine-service");
+        ucResource.setComputationalId(compId);
+        when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
 
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), any(EndpointDTO.class));
+        doThrow(new DlabException("Cannot create instance of resource class "))
+                .when(requestBuilder).newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), any(EndpointDTO.class));
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
-		} catch (DlabException e) {
-			assertEquals("Cannot create instance of resource class ", e.getMessage());
-		}
+        try {
+            computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        } catch (DlabException e) {
+            assertEquals("Cannot create instance of resource class ", e.getMessage());
+        }
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder);
-	}
+        verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder);
+    }
 
 	@Test
 	public void createDataEngineService() {
-		ProjectDTO projectDTO = getProjectDTO();
-		when(projectService.get(anyString())).thenReturn(projectDTO);
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(), any(UserComputationalResource.class)))
+                .thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ComputationalBase compBaseMocked = mock(ComputationalBase.class);
-		when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
-				any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class)))
-				.thenReturn(compBaseMocked);
+        ComputationalBase compBaseMocked = mock(ComputationalBase.class);
+        when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class)))
+                .thenReturn(compBaseMocked);
 
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		boolean creationResult =
-				computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, "");
-		assertTrue(creationResult);
+        boolean creationResult =
+                computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, PROJECT);
+        assertTrue(creationResult);
 
-		verify(projectService).get("");
+        verify(projectService).get(PROJECT);
 
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		verify(requestBuilder).newComputationalCreate(
-				refEq(userInfo), refEq(projectDTO), refEq(userInstance), any(ComputationalCreateFormDTO.class), refEq(endpointDTO()));
+        verify(requestBuilder).newComputationalCreate(
+                refEq(userInfo), refEq(projectDTO), refEq(userInstance), any(ComputationalCreateFormDTO.class), refEq(endpointDTO()));
 
-		verify(provisioningService)
-				.post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC, TOKEN,
-						compBaseMocked, String.class);
+        verify(provisioningService)
+                .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC, TOKEN,
+                        compBaseMocked, String.class);
 
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
-	}
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
+    }
 
 	@Test
 	public void createDataEngineServiceWhenComputationalResourceNotAdded() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(false);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+        when(computationalDAO.addComputational(anyString(), anyString(), any(), any(UserComputationalResource.class)))
+                .thenReturn(false);
 
-		boolean creationResult = computationalService.createDataEngineService(userInfo, formList.get(1), ucResource,
-				"");
-		assertFalse(creationResult);
+        boolean creationResult = computationalService.createDataEngineService(userInfo, formList.get(1), ucResource,
+                PROJECT);
+        assertFalse(creationResult);
 
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
+        verifyNoMoreInteractions(computationalDAO);
+    }
 
 	@Test
 	public void createDataEngineServiceWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(true);
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+        when(computationalDAO.addComputational(anyString(), anyString(), anyString(), any(UserComputationalResource.class)))
+                .thenReturn(true);
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		try {
-			computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, "");
-		} catch (DlabException e) {
-			assertEquals("Exploratory for user with name not found", e.getMessage());
-		}
+        try {
+            computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Exploratory for user with name not found", e.getMessage());
+        }
 
-		verify(computationalDAO, never())
-				.addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
+        verify(computationalDAO, never())
+                .addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
-				"self"));
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
-	}
+        verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
+                "self"));
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
+    }
 
 	@Test
 	public void createDataEngineServiceWhenMethodNewComputationalCreateThrowsException() {
-		ProjectDTO projectDTO = getProjectDTO();
-		when(projectService.get(anyString())).thenReturn(projectDTO);
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
-				.thenReturn(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        ProjectDTO projectDTO = getProjectDTO();
+        when(projectService.get(anyString())).thenReturn(projectDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(computationalDAO.addComputational(anyString(), anyString(), any(), any(UserComputationalResource.class)))
+                .thenReturn(true);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		doThrow(new DlabException("Cannot create instance of resource class "))
-				.when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
-				any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class));
+        doThrow(new DlabException("Cannot create instance of resource class "))
+                .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+                any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class));
 
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		ComputationalCreateFormDTO computationalCreateFormDTO = formList.get(1);
-		try {
-			computationalService.createDataEngineService(userInfo, computationalCreateFormDTO, ucResource, "");
-		} catch (DlabException e) {
-			assertEquals("Could not send request for creation the computational resource compName: " +
-					"Cannot create instance of resource class ", e.getMessage());
-		}
+        ComputationalCreateFormDTO computationalCreateFormDTO = formList.get(1);
+        try {
+            computationalService.createDataEngineService(userInfo, computationalCreateFormDTO, ucResource, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Could not send request for creation the computational resource compName: " +
+                    "Cannot create instance of resource class ", e.getMessage());
+        }
 
-		verify(projectService).get("");
-		verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newComputationalCreate(
-				refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(computationalCreateFormDTO), refEq(endpointDTO()));
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+        verify(projectService).get(PROJECT);
+        verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newComputationalCreate(
+                refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(computationalCreateFormDTO), refEq(endpointDTO()));
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
 
-		verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder);
-	}
+        verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder);
+    }
 
 	@Test
 	public void stopSparkCluster() {
-		final UserInstanceDTO exploratory = getUserInstanceDto();
-		exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE)));
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        final UserInstanceDTO exploratory = getUserInstanceDto();
+        exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE)));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		ComputationalStopDTO computationalStopDTO = new ComputationalStopDTO();
-		when(requestBuilder.newComputationalStop(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
-				any(EndpointDTO.class))).thenReturn(computationalStopDTO);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
-				.thenReturn("someUuid");
-		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
+        ComputationalStopDTO computationalStopDTO = new ComputationalStopDTO();
+        when(requestBuilder.newComputationalStop(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
+                any(EndpointDTO.class))).thenReturn(computationalStopDTO);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
+                .thenReturn("someUuid");
+        when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 
-		computationalService.stopSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME);
+        computationalService.stopSparkCluster(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStopping, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verify(requestBuilder).newComputationalStop(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
-		verify(provisioningService)
-				.post(eq(endpointDTO().getUrl() + "computational/stop/spark"), eq(TOKEN), refEq(computationalStopDTO),
-						eq(String.class));
-		verify(requestId).put(USER, "someUuid");
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
-				provisioningService, requestId);
-	}
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStopping, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verify(requestBuilder).newComputationalStop(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
+        verify(provisioningService)
+                .post(eq(endpointDTO().getUrl() + "computational/stop/spark"), eq(TOKEN), refEq(computationalStopDTO),
+                        eq(String.class));
+        verify(requestId).put(USER, "someUuid");
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
+                provisioningService, requestId);
+    }
 
 	@Test
 	public void stopSparkClusterWhenDataengineTypeIsAnother() {
-		final UserInstanceDTO exploratory = getUserInstanceDto();
-		exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE_SERVICE)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
-		expectedException.expect(IllegalStateException.class);
-		expectedException.expectMessage("There is no running dataengine compName for exploratory expName");
+        final UserInstanceDTO exploratory = getUserInstanceDto();
+        exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE_SERVICE)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+        expectedException.expect(IllegalStateException.class);
+        expectedException.expectMessage("There is no running dataengine compName for exploratory expName");
 
-		computationalService.stopSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME);
-	}
+        computationalService.stopSparkCluster(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+    }
 
 	@Test
 	public void startSparkCluster() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		final UserInstanceDTO exploratory = getUserInstanceDto();
-		exploratory.setResources(singletonList(getUserComputationalResource(STOPPED, DOCKER_DLAB_DATAENGINE)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
-		when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        final UserInstanceDTO exploratory = getUserInstanceDto();
+        exploratory.setResources(singletonList(getUserComputationalResource(STOPPED, DOCKER_DLAB_DATAENGINE)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+        when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+                .thenReturn(mock(UpdateResult.class));
 
-		ComputationalStartDTO computationalStartDTO = new ComputationalStartDTO();
-		when(requestBuilder.newComputationalStart(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
-				any(EndpointDTO.class))).thenReturn(computationalStartDTO);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
-				.thenReturn("someUuid");
-		when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
+        ComputationalStartDTO computationalStartDTO = new ComputationalStartDTO();
+        when(requestBuilder.newComputationalStart(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
+                any(EndpointDTO.class))).thenReturn(computationalStartDTO);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
+                .thenReturn("someUuid");
+        when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
 
-		computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, "");
+        computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, PROJECT);
 
-		verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStarting, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verify(requestBuilder).newComputationalStart(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
-		verify(provisioningService)
-				.post(eq(endpointDTO().getUrl() + "computational/start/spark"), eq(TOKEN),
-						refEq(computationalStartDTO),
-						eq(String.class));
-		verify(requestId).put(USER, "someUuid");
-		verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
-				provisioningService, requestId);
-	}
+        verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStarting, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verify(requestBuilder).newComputationalStart(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
+        verify(provisioningService)
+                .post(eq(endpointDTO().getUrl() + "computational/start/spark"), eq(TOKEN),
+                        refEq(computationalStartDTO),
+                        eq(String.class));
+        verify(requestId).put(USER, "someUuid");
+        verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
+                provisioningService, requestId);
+    }
 
 	@Test
 	public void startSparkClusterWhenDataengineStatusIsRunning() {
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		userInstanceDto.setResources(singletonList(getUserComputationalResource(RUNNING,
-				DOCKER_DLAB_DATAENGINE_SERVICE)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        userInstanceDto.setResources(singletonList(getUserComputationalResource(RUNNING,
+                DOCKER_DLAB_DATAENGINE_SERVICE)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
 
-		expectedException.expect(IllegalStateException.class);
-		expectedException.expectMessage("There is no stopped dataengine compName for exploratory expName");
+        expectedException.expect(IllegalStateException.class);
+        expectedException.expectMessage("There is no stopped dataengine compName for exploratory expName");
 
-		computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, "");
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void updateComputationalsReuploadKeyFlag() {
-		doNothing().when(computationalDAO).updateReuploadKeyFlagForComputationalResources(anyString(), any(List.class),
-				any(List.class), anyBoolean(), anyVararg());
-
-		computationalService.updateComputationalsReuploadKeyFlag(USER, singletonList(RUNNING),
-				singletonList(DataEngineType.SPARK_STANDALONE), true, RUNNING);
-
-		verify(computationalDAO).updateReuploadKeyFlagForComputationalResources(USER, singletonList
-						(RUNNING),
-				singletonList(DataEngineType.SPARK_STANDALONE), true, RUNNING);
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, PROJECT);
+    }
 
 	@Test
 	public void getComputationalResource() {
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+        when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
 
-		Optional<UserComputationalResource> expectedResource = Optional.of(ucResource);
-		Optional<UserComputationalResource> actualResource =
-				computationalService.getComputationalResource(USER, EXPLORATORY_NAME, COMP_NAME);
-		assertEquals(expectedResource, actualResource);
+        Optional<UserComputationalResource> expectedResource = Optional.of(ucResource);
+        Optional<UserComputationalResource> actualResource =
+                computationalService.getComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        assertEquals(expectedResource, actualResource);
 
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        verifyNoMoreInteractions(computationalDAO);
+    }
 
 	@Test
 	public void getComputationalResourceWithException() {
-		doThrow(new DlabException("Computational resource not found"))
-				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
+        doThrow(new DlabException("Computational resource not found"))
+                .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
 
-		Optional<UserComputationalResource> expectedResource = Optional.empty();
-		Optional<UserComputationalResource> actualResource =
-				computationalService.getComputationalResource(USER, EXPLORATORY_NAME, COMP_NAME);
-		assertEquals(expectedResource, actualResource);
+        Optional<UserComputationalResource> expectedResource = Optional.empty();
+        Optional<UserComputationalResource> actualResource =
+                computationalService.getComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        assertEquals(expectedResource, actualResource);
 
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
-		verifyNoMoreInteractions(computationalDAO);
-	}
+        verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+        verifyNoMoreInteractions(computationalDAO);
+    }
 
 	@Test
 	public void testUpdateSparkClusterConfig() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		final ComputationalClusterConfigDTO clusterConfigDTO = new ComputationalClusterConfigDTO();
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
-		userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(RUNNING, COMP_NAME)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
-		when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(ClusterConfig.class), any(EndpointDTO.class)))
-				.thenReturn(clusterConfigDTO);
-		when(provisioningService.post(anyString(), anyString(), any(ComputationalClusterConfigDTO.class), any()))
-				.thenReturn("someUuid");
-		computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME,
-				config);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        final ComputationalClusterConfigDTO clusterConfigDTO = new ComputationalClusterConfigDTO();
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+        userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(RUNNING, COMP_NAME)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(ClusterConfig.class), any(EndpointDTO.class)))
+                .thenReturn(clusterConfigDTO);
+        when(provisioningService.post(anyString(), anyString(), any(ComputationalClusterConfigDTO.class), any()))
+                .thenReturn("someUuid");
+        computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                COMP_NAME, config);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(userInstanceDto),
-				refEq(getUserComputationalResource(RUNNING, COMP_NAME)),
-				eq(Collections.singletonList(new ClusterConfig())), eq(endpointDTO()));
-		verify(requestId).put(USER, "someUuid");
-		verify(computationalDAO).updateComputationalFields(refEq(new ComputationalStatusDTO()
-				.withConfig(config)
-				.withUser(USER)
-				.withExploratoryName(EXPLORATORY_NAME)
-				.withComputationalName(COMP_NAME)
-				.withStatus(UserInstanceStatus.RECONFIGURING.toString()), "self"));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "computational/spark/reconfigure"),
-				eq(getUserInfo().getAccessToken()),
-				refEq(new ComputationalClusterConfigDTO()), eq(String.class));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(userInstanceDto),
+                refEq(getUserComputationalResource(RUNNING, COMP_NAME)),
+                eq(Collections.singletonList(new ClusterConfig())), eq(endpointDTO()));
+        verify(requestId).put(USER, "someUuid");
+        verify(computationalDAO).updateComputationalFields(refEq(new ComputationalStatusDTO()
+                .withProject(PROJECT)
+                .withConfig(config)
+                .withUser(USER)
+                .withExploratoryName(EXPLORATORY_NAME)
+                .withComputationalName(COMP_NAME)
+                .withStatus(UserInstanceStatus.RECONFIGURING.toString()), "self"));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "computational/spark/reconfigure"),
+                eq(getUserInfo().getAccessToken()),
+                refEq(new ComputationalClusterConfigDTO()), eq(String.class));
 
-	}
+    }
 
 	@Test
 	public void testUpdateSparkClusterConfigWhenClusterIsNotRunning() {
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
-		userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
-		try {
-			computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME,
-					config);
-		} catch (ResourceNotFoundException e) {
-			assertEquals("Running computational resource with name compName for exploratory expName not found",
-					e.getMessage());
-		}
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+        userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        try {
+            computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                    COMP_NAME, config);
+        } catch (ResourceNotFoundException e) {
+            assertEquals("Running computational resource with name compName for exploratory expName not found",
+                    e.getMessage());
+        }
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(provisioningService, requestBuilder, requestId);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verifyNoMoreInteractions(exploratoryDAO);
+        verifyZeroInteractions(provisioningService, requestBuilder, requestId);
 
-	}
+    }
 
 	@Test
 	public void testUpdateSparkClusterConfigWhenClusterIsNotFound() {
-		final UserInstanceDTO userInstanceDto = getUserInstanceDto();
-		final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
-		userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
-		try {
-			computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME + "X",
-					config);
-		} catch (ResourceNotFoundException e) {
-			assertEquals("Running computational resource with name compNameX for exploratory expName not found",
-					e.getMessage());
-		}
+        final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+        final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+        userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+        try {
+            computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                    COMP_NAME + "X", config);
+        } catch (ResourceNotFoundException e) {
+            assertEquals("Running computational resource with name compNameX for exploratory expName not found",
+                    e.getMessage());
+        }
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
-		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(provisioningService, requestBuilder, requestId);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+        verifyNoMoreInteractions(exploratoryDAO);
+        verifyZeroInteractions(provisioningService, requestBuilder, requestId);
 
-	}
+    }
 
 	@Test
 	public void testGetClusterConfig() {
-		when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
+        when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
 
-		final List<ClusterConfig> clusterConfig = computationalService.getClusterConfig(getUserInfo(),
-				EXPLORATORY_NAME, COMP_NAME);
-		final ClusterConfig config = clusterConfig.get(0);
+        final List<ClusterConfig> clusterConfig = computationalService.getClusterConfig(getUserInfo(), PROJECT,
+                EXPLORATORY_NAME, COMP_NAME);
+        final ClusterConfig config = clusterConfig.get(0);
 
-		assertEquals(1, clusterConfig.size());
-		assertEquals("test", config.getClassification());
-		assertNull(config.getConfigurations());
-		assertNull(config.getProperties());
-	}
+        assertEquals(1, clusterConfig.size());
+        assertEquals("test", config.getClassification());
+        assertNull(config.getConfigurations());
+        assertNull(config.getProperties());
+    }
 
 
 	@Test
 	public void testGetClusterConfigWithException() {
-		when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString())).thenThrow(new RuntimeException(
-				"Exception"));
+        when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString(), anyString())).thenThrow(new RuntimeException(
+                "Exception"));
 
-		expectedException.expectMessage("Exception");
-		expectedException.expect(RuntimeException.class);
-		computationalService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME);
-	}
+        expectedException.expectMessage("Exception");
+        expectedException.expect(RuntimeException.class);
+        computationalService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME, COMP_NAME);
+    }
 
 	private ClusterConfig getClusterConfig() {
 		final ClusterConfig config = new ClusterConfig();
@@ -705,26 +711,29 @@
 
 	private UserInstanceDTO getUserInstanceDto() {
 		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
-				.withExploratoryId("explId")
+                .withExploratoryId("explId")
+                .withProject(PROJECT)
 				.withTags(Collections.emptyMap());
 	}
 
 	private List<ComputationalCreateFormDTO> getFormList() {
-		SparkStandaloneClusterCreateForm sparkClusterForm = new SparkStandaloneClusterCreateForm();
-		sparkClusterForm.setNotebookName(EXPLORATORY_NAME);
-		sparkClusterForm.setName(COMP_NAME);
-		sparkClusterForm.setDataEngineInstanceCount(String.valueOf(2));
-		sparkClusterForm.setImage("dataengine");
-		ComputationalCreateFormDTO desClusterForm = new ComputationalCreateFormDTO();
-		desClusterForm.setNotebookName(EXPLORATORY_NAME);
-		desClusterForm.setName(COMP_NAME);
+        SparkStandaloneClusterCreateForm sparkClusterForm = new SparkStandaloneClusterCreateForm();
+        sparkClusterForm.setNotebookName(EXPLORATORY_NAME);
+        sparkClusterForm.setName(COMP_NAME);
+        sparkClusterForm.setProject(PROJECT);
+        sparkClusterForm.setDataEngineInstanceCount(String.valueOf(2));
+        sparkClusterForm.setImage("dataengine");
+        ComputationalCreateFormDTO desClusterForm = new ComputationalCreateFormDTO();
+        desClusterForm.setNotebookName(EXPLORATORY_NAME);
+        desClusterForm.setName(COMP_NAME);
 
-		return Arrays.asList(sparkClusterForm, desClusterForm);
-	}
+        return Arrays.asList(sparkClusterForm, desClusterForm);
+    }
 
 	private ComputationalStatusDTO getComputationalStatusDTOWithStatus(String status) {
 		return new ComputationalStatusDTO()
-				.withUser(USER)
+                .withUser(USER)
+                .withProject(PROJECT)
 				.withExploratoryName(EXPLORATORY_NAME)
 				.withComputationalName(COMP_NAME)
 				.withStatus(UserInstanceStatus.of(status));
@@ -762,8 +771,8 @@
 	}
 
 	private ProjectDTO getProjectDTO() {
-		return new ProjectDTO("project", Collections.emptySet(), "", "", null,
-				singletonList(new ProjectEndpointDTO("endpoint", UserInstanceStatus.RUNNING,
-						new EdgeInfo())), true);
-	}
+        return new ProjectDTO(PROJECT, Collections.emptySet(), "", "", null,
+                singletonList(new ProjectEndpointDTO("endpoint", UserInstanceStatus.RUNNING,
+                        new EdgeInfo())), true);
+    }
 }
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
index 5396ca8..460c2f9 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
@@ -34,7 +34,6 @@
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.base.edge.EdgeInfo;
 import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.exceptions.ResourceConflictException;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -43,11 +42,24 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anySet;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class EnvironmentServiceImplTest {
@@ -57,8 +69,6 @@
 	private static final String EXPLORATORY_NAME_2 = "expName2";
 	private static final String TOKEN = "token";
 	private static final String UUID = "213-12312-321";
-	private static final String RUNNING_STATE = "running";
-	private static final String STOPPED_STATE = "stopped";
 	private static final String PROJECT_NAME = "projectName";
 	private static final String ENDPOINT_NAME = "endpointName";
 	private static final String ADMIN = "admin";
@@ -112,73 +122,6 @@
 		environmentService.getUsers();
 	}
 
-	@Test
-	public void getAllUsers() {
-		doReturn(Collections.singleton(USER)).when(envDAO).fetchAllUsers();
-		final Set<String> users = environmentService.getUserNames();
-
-		assertEquals(1, users.size());
-		assertTrue(users.contains(USER));
-
-		verify(envDAO).fetchAllUsers();
-		verifyNoMoreInteractions(envDAO);
-	}
-
-	@Test
-	public void getAllUsersWithException() {
-		doThrow(new DlabException("Users not found")).when(envDAO).fetchAllUsers();
-
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Users not found");
-
-		environmentService.getUserNames();
-	}
-
-
-	@Test
-	public void stopEnvironment() {
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString())).thenReturn(getUserInstances());
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
-
-		environmentService.stopEnvironment(userInfo, USER);
-
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(exploratoryDAO, exploratoryService);
-	}
-
-	@Test
-	@SuppressWarnings("unchecked")
-	public void stopEnvironmentWithWrongResourceState() {
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), any(List.class), anyVararg()))
-				.thenReturn(getUserInstances());
-		expectedException.expect(ResourceConflictException.class);
-
-		environmentService.stopEnvironment(getUserInfo(), USER);
-	}
-
-	@Test
-	public void stopEnvironmentWithoutEdge() {
-		final UserInfo userInfo = getUserInfo();
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString())).thenReturn(getUserInstances());
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
-
-		environmentService.stopEnvironment(userInfo, USER);
-
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
-				UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
-				UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
-		verifyNoMoreInteractions(envDAO, exploratoryDAO, exploratoryService);
-	}
 
 	@Test
 	public void stopProjectEnvironment() {
@@ -186,15 +129,15 @@
 		final ProjectDTO projectDTO = getProjectDTO();
 		when(exploratoryDAO.fetchRunningExploratoryFieldsForProject(anyString())).thenReturn(getUserInstances());
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(userInfo);
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
 		when(projectService.get(anyString())).thenReturn(projectDTO);
 		doNothing().when(projectService).stop(any(UserInfo.class), anyString(), anyString());
 
 		environmentService.stopProjectEnvironment(PROJECT_NAME);
 
 		verify(exploratoryDAO).fetchRunningExploratoryFieldsForProject(PROJECT_NAME);
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
+		verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
+		verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_2));
 		verify(securityService, times(2)).getServiceAccountInfo(USER);
 		verify(securityService).getServiceAccountInfo(ADMIN);
 		verify(projectService).get(eq(PROJECT_NAME));
@@ -208,33 +151,33 @@
 	@Test
 	public void stopExploratory() {
 		final UserInfo userInfo = getUserInfo();
-		when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+		when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
 
-		environmentService.stopExploratory(new UserInfo(USER, TOKEN), USER, EXPLORATORY_NAME_1);
+		environmentService.stopExploratory(new UserInfo(USER, TOKEN), USER, PROJECT_NAME, EXPLORATORY_NAME_1);
 
-		verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
+		verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
 		verifyNoMoreInteractions(securityService, exploratoryService);
 	}
 
 	@Test
 	public void stopComputational() {
 		final UserInfo userInfo = getUserInfo();
-		doNothing().when(computationalService).stopSparkCluster(any(UserInfo.class), anyString(), anyString());
+		doNothing().when(computationalService).stopSparkCluster(any(UserInfo.class), anyString(), anyString(), anyString());
 
-		environmentService.stopComputational(userInfo, USER, EXPLORATORY_NAME_1, "compName");
+		environmentService.stopComputational(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1, "compName");
 
-		verify(computationalService).stopSparkCluster(refEq(userInfo), eq(EXPLORATORY_NAME_1), eq("compName"));
+		verify(computationalService).stopSparkCluster(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1), eq("compName"));
 		verifyNoMoreInteractions(securityService, computationalService);
 	}
 
 	@Test
 	public void terminateExploratory() {
 		final UserInfo userInfo = getUserInfo();
-		when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn(UUID);
+		when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
 
-		environmentService.terminateExploratory(userInfo, USER, EXPLORATORY_NAME_1);
+		environmentService.terminateExploratory(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1);
 
-		verify(exploratoryService).terminate(refEq(userInfo), eq(EXPLORATORY_NAME_1));
+		verify(exploratoryService).terminate(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
 		verifyNoMoreInteractions(securityService, exploratoryService);
 	}
 
@@ -242,12 +185,12 @@
 	public void terminateComputational() {
 		final UserInfo userInfo = getUserInfo();
 		doNothing().when(computationalService)
-				.terminateComputational(any(UserInfo.class), anyString(), anyString());
+				.terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
 
-		environmentService.terminateComputational(userInfo, USER, EXPLORATORY_NAME_1, "compName");
+		environmentService.terminateComputational(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1, "compName");
 
 		verify(computationalService)
-				.terminateComputational(refEq(userInfo), eq(EXPLORATORY_NAME_1), eq("compName"));
+				.terminateComputational(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1), eq("compName"));
 		verifyNoMoreInteractions(securityService, computationalService);
 	}
 
@@ -257,8 +200,8 @@
 
 	private List<UserInstanceDTO> getUserInstances() {
 		return Arrays.asList(
-				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_1).withUser(USER).withProject("prj"),
-				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_2).withUser(USER).withProject("prj"));
+				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_1).withUser(USER).withProject(PROJECT_NAME),
+				new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_2).withUser(USER).withProject(PROJECT_NAME));
 	}
 
 	private ProjectDTO getProjectDTO() {
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
index e7d718d..5d21167 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
@@ -66,7 +66,6 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.anyListOf;
 import static org.mockito.Mockito.anyMapOf;
 import static org.mockito.Mockito.anyString;
@@ -85,26 +84,27 @@
 @RunWith(MockitoJUnitRunner.class)
 public class ExploratoryServiceImplTest {
 
-	private final String USER = "test";
-	private final String TOKEN = "token";
-	private final String EXPLORATORY_NAME = "expName";
-	private final String UUID = "1234-56789765-4321";
-	private static final String ENDPOINT_NAME = "endpointName";
+    private final String USER = "test";
+    private final String TOKEN = "token";
+    private final String PROJECT = "project";
+    private final String EXPLORATORY_NAME = "expName";
+    private final String UUID = "1234-56789765-4321";
+    private static final String ENDPOINT_NAME = "endpointName";
 
 
-	private UserInfo userInfo;
-	private UserInstanceDTO userInstance;
-	private StatusEnvBaseDTO statusEnvBaseDTO;
+    private UserInfo userInfo;
+    private UserInstanceDTO userInstance;
+    private StatusEnvBaseDTO statusEnvBaseDTO;
 
-	@Mock
-	private ProjectService projectService;
-	@Mock
-	private ExploratoryDAO exploratoryDAO;
-	@Mock
-	private ComputationalDAO computationalDAO;
-	@Mock
-	private GitCredsDAO gitCredsDAO;
-	@Mock
+    @Mock
+    private ProjectService projectService;
+    @Mock
+    private ExploratoryDAO exploratoryDAO;
+    @Mock
+    private ComputationalDAO computationalDAO;
+    @Mock
+    private GitCredsDAO gitCredsDAO;
+    @Mock
 	private RESTService provisioningService;
 	@Mock
 	private RequestBuilder requestBuilder;
@@ -129,162 +129,162 @@
 
 	@Test
 	public void start() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ExploratoryGitCredsDTO egcDtoMock = mock(ExploratoryGitCredsDTO.class);
-		when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDtoMock);
+        ExploratoryGitCredsDTO egcDtoMock = mock(ExploratoryGitCredsDTO.class);
+        when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDtoMock);
 
-		ExploratoryActionDTO egcuDto = new ExploratoryGitCredsUpdateDTO();
-		egcuDto.withExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newExploratoryStart(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
-				any(ExploratoryGitCredsDTO.class))).thenReturn(egcuDto);
+        ExploratoryActionDTO egcuDto = new ExploratoryGitCredsUpdateDTO();
+        egcuDto.withExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newExploratoryStart(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                any(ExploratoryGitCredsDTO.class))).thenReturn(egcuDto);
 
-		String exploratoryStart = "exploratory/start";
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any()))
-				.thenReturn(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        String exploratoryStart = "exploratory/start";
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any()))
+                .thenReturn(UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		String uuid = exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
+        String uuid = exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
+        assertNotNull(uuid);
+        assertEquals(UUID, uuid);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
 
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStart, TOKEN, egcuDto, String.class);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
-	}
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStart, TOKEN, egcuDto, String.class);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
+    }
 
 	@Test
 	public void startWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
-		try {
-			exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
-		} catch (DlabException e) {
-			assertEquals("Could not start exploratory environment expName: Exploratory for user with " +
-					"name not found", e.getMessage());
-		}
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+        try {
+            exploratoryService.start(userInfo, EXPLORATORY_NAME, PROJECT);
+        } catch (DlabException e) {
+            assertEquals("Could not start exploratory environment expName: Exploratory for user with " +
+                    "name not found", e.getMessage());
+        }
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void stop() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
-		eaDto.withExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
-				.thenReturn(eaDto);
+        ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
+        eaDto.withExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
+                .thenReturn(eaDto);
 
-		String exploratoryStop = "exploratory/stop";
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
-				(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        String exploratoryStop = "exploratory/stop";
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
+                (UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		String uuid = exploratoryService.stop(userInfo, EXPLORATORY_NAME);
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
+        String uuid = exploratoryService.stop(userInfo, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(uuid);
+        assertEquals(UUID, uuid);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
 
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStop, TOKEN, eaDto, String.class);
-		verify(computationalDAO).updateComputationalStatusesForExploratory(userInfo.getName(), EXPLORATORY_NAME,
-				UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING, UserInstanceStatus.FAILED,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
-	}
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStop, TOKEN, eaDto, String.class);
+        verify(computationalDAO).updateComputationalStatusesForExploratory(userInfo.getName(), PROJECT,
+                EXPLORATORY_NAME, UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING,
+                UserInstanceStatus.FAILED, UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
+    }
 
 	@Test
 	public void stopWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
-		try {
-			exploratoryService.stop(userInfo, EXPLORATORY_NAME);
-		} catch (DlabException e) {
-			assertEquals("Could not stop exploratory environment expName: Exploratory for user with " +
-					"name not found", e.getMessage());
-		}
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+        try {
+            exploratoryService.stop(userInfo, PROJECT, EXPLORATORY_NAME);
+        } catch (DlabException e) {
+            assertEquals("Could not stop exploratory environment expName: Exploratory for user with " +
+                    "name not found", e.getMessage());
+        }
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void terminate() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
-		eaDto.withExploratoryName(EXPLORATORY_NAME);
-		when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
-				.thenReturn(eaDto);
+        ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
+        eaDto.withExploratoryName(EXPLORATORY_NAME);
+        when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
+                .thenReturn(eaDto);
 
-		String exploratoryTerminate = "exploratory/terminate";
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
-				(UUID);
-		when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+        String exploratoryTerminate = "exploratory/terminate";
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
+                (UUID);
+        when(requestId.put(anyString(), anyString())).thenReturn(UUID);
 
-		String uuid = exploratoryService.terminate(userInfo, EXPLORATORY_NAME);
-		assertNotNull(uuid);
-		assertEquals(UUID, uuid);
+        String uuid = exploratoryService.terminate(userInfo, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(uuid);
+        assertEquals(UUID, uuid);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
 
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME, UserInstanceStatus
-						.TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED,
-				UserInstanceStatus.FAILED);
-		verify(requestBuilder).newExploratoryStop(userInfo, userInstance, endpointDTO());
-		verify(provisioningService).post(endpointDTO().getUrl() + exploratoryTerminate, TOKEN, eaDto, String.class);
-		verify(requestId).put(USER, UUID);
-		verifyNoMoreInteractions(exploratoryDAO, computationalDAO, requestBuilder, provisioningService, requestId);
-	}
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(computationalDAO).updateComputationalStatusesForExploratory(USER, PROJECT, EXPLORATORY_NAME,
+                UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED,
+                UserInstanceStatus.FAILED);
+        verify(requestBuilder).newExploratoryStop(userInfo, userInstance, endpointDTO());
+        verify(provisioningService).post(endpointDTO().getUrl() + exploratoryTerminate, TOKEN, eaDto, String.class);
+        verify(requestId).put(USER, UUID);
+        verifyNoMoreInteractions(exploratoryDAO, computationalDAO, requestBuilder, provisioningService, requestId);
+    }
 
 	@Test
 	public void terminateWhenMethodFetchExploratoryFieldsThrowsException() {
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
-		try {
-			exploratoryService.terminate(userInfo, EXPLORATORY_NAME);
-		} catch (DlabException e) {
-			assertEquals("Could not terminate exploratory environment expName: Exploratory for user " +
-					"with name not found", e.getMessage());
-		}
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+        try {
+            exploratoryService.terminate(userInfo, PROJECT, EXPLORATORY_NAME);
+        } catch (DlabException e) {
+            assertEquals("Could not terminate exploratory environment expName: Exploratory for user " +
+                    "with name not found", e.getMessage());
+        }
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void create() {
@@ -389,202 +389,116 @@
 	}
 
 	@Test
-	public void updateExploratoryStatusesWithRunningStatus() {
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-
-		exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.RUNNING);
-
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("running");
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
-
-	@Test
-	public void updateExploratoryStatusesWithStoppingStatus() {
-		userInstance.setStatus("stopping");
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
-				any(UserInstanceStatus.class), any(UserInstanceStatus.class));
-
-		exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.STOPPING);
-
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME,
-				UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING, UserInstanceStatus.FAILED,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
-		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
-	}
-
-	@Test
-	public void updateExploratoryStatusesWithTerminatingStatus() {
-		userInstance.setStatus("terminating");
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
-				.thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class)))
-				.thenReturn(10);
-
-		exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.TERMINATING);
-
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME, UserInstanceStatus
-				.TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED, UserInstanceStatus
-				.FAILED);
-		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
-	}
-
-	@Test
 	public void updateProjectExploratoryStatuses() {
-		when(exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(anyString(), anyString(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
-		doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
-				any(UserInstanceStatus.class), any(UserInstanceStatus.class), anyVararg());
+        when(exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(anyString(), anyString(), anyVararg()))
+                .thenReturn(singletonList(userInstance));
+        when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+        doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
+                anyString(), any(UserInstanceStatus.class), any(UserInstanceStatus.class), anyVararg());
 
-		exploratoryService.updateProjectExploratoryStatuses("project", "endpoint",
-				UserInstanceStatus.TERMINATED);
-		statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminated");
+        exploratoryService.updateProjectExploratoryStatuses("project", "endpoint",
+                UserInstanceStatus.TERMINATED);
+        statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminated");
 
-		verify(exploratoryDAO).fetchProjectExploratoriesWhereStatusNotIn("project", "endpoint",
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
-		verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
-		verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME,
-				UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATED,
-				UserInstanceStatus.FAILED);
+        verify(exploratoryDAO).fetchProjectExploratoriesWhereStatusNotIn("project", "endpoint",
+                UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
+        verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+        verify(computationalDAO).updateComputationalStatusesForExploratory(USER, PROJECT,
+                EXPLORATORY_NAME, UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATED,
+                UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
 
-		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
-	}
-
-	@Test
-	public void updateUserExploratoriesReuploadKeyFlag() {
-		doNothing().when(exploratoryDAO).updateReuploadKeyForExploratories(anyString(), anyBoolean(),
-				any(UserInstanceStatus.class));
-
-		exploratoryService.updateExploratoriesReuploadKeyFlag(USER, true, UserInstanceStatus.RUNNING);
-
-		verify(exploratoryDAO).updateReuploadKeyForExploratories(USER, true, UserInstanceStatus.RUNNING);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
-
-	@Test
-	public void getInstancesWithStatuses() {
-		when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), anyBoolean(), anyVararg()))
-				.thenReturn(singletonList(userInstance));
-		exploratoryService.getInstancesWithStatuses(USER, UserInstanceStatus.RUNNING, UserInstanceStatus.RUNNING);
-
-		verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, true, UserInstanceStatus.RUNNING);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
+    }
 
 	@Test
 	public void getUserInstance() {
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 
-		Optional<UserInstanceDTO> expectedInstance = Optional.of(userInstance);
-		Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, EXPLORATORY_NAME);
-		assertEquals(expectedInstance, actualInstance);
+        Optional<UserInstanceDTO> expectedInstance = Optional.of(userInstance);
+        Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, PROJECT, EXPLORATORY_NAME);
+        assertEquals(expectedInstance, actualInstance);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void getUserInstanceWithException() {
-		doThrow(new ResourceNotFoundException("Exploratory for user not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+        doThrow(new ResourceNotFoundException("Exploratory for user not found"))
+                .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 
-		Optional<UserInstanceDTO> expectedInstance = Optional.empty();
-		Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, EXPLORATORY_NAME);
-		assertEquals(expectedInstance, actualInstance);
+        Optional<UserInstanceDTO> expectedInstance = Optional.empty();
+        Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, PROJECT, EXPLORATORY_NAME);
+        assertEquals(expectedInstance, actualInstance);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void testUpdateExploratoryClusterConfig() {
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
-				anyListOf(ClusterConfig.class), any(EndpointDTO.class))).thenReturn(new ExploratoryReconfigureSparkClusterActionDTO());
-		when(provisioningService.post(anyString(), anyString(), any(ExploratoryReconfigureSparkClusterActionDTO.class)
-				, any())).thenReturn(UUID);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
+                anyListOf(ClusterConfig.class), any(EndpointDTO.class))).thenReturn(new ExploratoryReconfigureSparkClusterActionDTO());
+        when(provisioningService.post(anyString(), anyString(), any(ExploratoryReconfigureSparkClusterActionDTO.class)
+                , any())).thenReturn(UUID);
 
-		exploratoryService.updateClusterConfig(getUserInfo(), EXPLORATORY_NAME, singletonList(new ClusterConfig()));
+        exploratoryService.updateClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME, singletonList(new ClusterConfig()));
 
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(getUserInstanceDto()),
-				refEq(singletonList(new ClusterConfig())), refEq(endpointDTO()));
-		verify(requestId).put(USER, UUID);
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "exploratory/reconfigure_spark"), eq(TOKEN),
-				refEq(new ExploratoryReconfigureSparkClusterActionDTO(), "self"), eq(String.class));
-		verify(exploratoryDAO).updateExploratoryFields(refEq(new ExploratoryStatusDTO()
-				.withUser(USER)
-				.withConfig(singletonList(new ClusterConfig()))
-				.withStatus(UserInstanceStatus.RECONFIGURING.toString())
-				.withExploratoryName(EXPLORATORY_NAME), "self"));
-		verifyNoMoreInteractions(requestBuilder, requestId, exploratoryDAO, provisioningService);
-	}
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(getUserInstanceDto()),
+                refEq(singletonList(new ClusterConfig())), refEq(endpointDTO()));
+        verify(requestId).put(USER, UUID);
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "exploratory/reconfigure_spark"), eq(TOKEN),
+                refEq(new ExploratoryReconfigureSparkClusterActionDTO(), "self"), eq(String.class));
+        verify(exploratoryDAO).updateExploratoryFields(refEq(new ExploratoryStatusDTO()
+                .withUser(USER)
+                .withProject(PROJECT)
+                .withConfig(singletonList(new ClusterConfig()))
+                .withStatus(UserInstanceStatus.RECONFIGURING.toString())
+                .withExploratoryName(EXPLORATORY_NAME), "self"));
+        verifyNoMoreInteractions(requestBuilder, requestId, exploratoryDAO, provisioningService);
+    }
 
 	@Test
 	public void testUpdateExploratoryClusterConfigWhenNotRunning() {
 
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenThrow(new ResourceNotFoundException("EXCEPTION"));
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenThrow(new ResourceNotFoundException("EXCEPTION"));
 
-		try {
+        try {
 
-			exploratoryService.updateClusterConfig(getUserInfo(), EXPLORATORY_NAME,
-					singletonList(new ClusterConfig()));
-		} catch (ResourceNotFoundException e) {
-			assertEquals("EXCEPTION", e.getMessage());
-		}
+            exploratoryService.updateClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+                    singletonList(new ClusterConfig()));
+        } catch (ResourceNotFoundException e) {
+            assertEquals("EXCEPTION", e.getMessage());
+        }
 
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-		verifyZeroInteractions(requestBuilder, requestId, provisioningService);
-
-	}
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+        verifyZeroInteractions(requestBuilder, requestId, provisioningService);
+    }
 
 	@Test
 	public void testGetClusterConfig() {
+        when(exploratoryDAO.getClusterConfig(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
+        final List<ClusterConfig> clusterConfig = exploratoryService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME);
 
-		when(exploratoryDAO.getClusterConfig(anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
-		final List<ClusterConfig> clusterConfig = exploratoryService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME);
+        assertEquals(1, clusterConfig.size());
+        assertEquals("classification", clusterConfig.get(0).getClassification());
 
-		assertEquals(1, clusterConfig.size());
-		assertEquals("classification", clusterConfig.get(0).getClassification());
-
-		verify(exploratoryDAO).getClusterConfig(getUserInfo().getName(), EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO);
-	}
+        verify(exploratoryDAO).getClusterConfig(getUserInfo().getName(), PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(exploratoryDAO);
+    }
 
 	@Test
 	public void testGetClusterConfigWithException() {
+        when(exploratoryDAO.getClusterConfig(anyString(), anyString(), anyString())).thenThrow(new RuntimeException("Exception"));
 
-		when(exploratoryDAO.getClusterConfig(anyString(), anyString())).thenThrow(new RuntimeException("Exception"));
-
-		expectedException.expect(RuntimeException.class);
-		expectedException.expectMessage("Exception");
-		exploratoryService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME);
-	}
+        expectedException.expect(RuntimeException.class);
+        expectedException.expectMessage("Exception");
+        exploratoryService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME);
+    }
 
 	private ClusterConfig getClusterConfig() {
 		final ClusterConfig config = new ClusterConfig();
@@ -597,21 +511,25 @@
 	}
 
 	private UserInstanceDTO getUserInstanceDto() {
-		UserComputationalResource compResource = new UserComputationalResource();
-		compResource.setImageName("YYYY.dataengine");
-		compResource.setComputationalName("compName");
-		compResource.setStatus("stopped");
-		compResource.setComputationalId("compId");
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME).withStatus("running")
-				.withResources(singletonList(compResource))
-				.withTags(Collections.emptyMap())
-				.withProject("project")
-				.withEndpoint("test")
-				.withCloudProvider(CloudProvider.AWS.toString());
-	}
+        UserComputationalResource compResource = new UserComputationalResource();
+        compResource.setImageName("YYYY.dataengine");
+        compResource.setComputationalName("compName");
+        compResource.setStatus("stopped");
+        compResource.setComputationalId("compId");
+        return new UserInstanceDTO()
+                .withUser(USER)
+                .withExploratoryName(EXPLORATORY_NAME)
+                .withStatus("running")
+                .withResources(singletonList(compResource))
+                .withTags(Collections.emptyMap())
+                .withProject(PROJECT)
+                .withEndpoint("test")
+                .withCloudProvider(CloudProvider.AWS.toString());
+    }
 
 	private StatusEnvBaseDTO getStatusEnvBaseDTOWithStatus(String status) {
 		return new ExploratoryStatusDTO()
+                .withProject(PROJECT)
 				.withUser(USER)
 				.withExploratoryName(EXPLORATORY_NAME)
 				.withStatus(status);
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
index 7509971..57d0284 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
@@ -56,7 +56,16 @@
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.anyVararg;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class ImageExploratoryServiceImplTest {
@@ -98,10 +107,10 @@
 
 	@Test
 	public void createImage() {
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(false);
 
-		when(libDAO.getLibraries(anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
+		when(libDAO.getLibraries(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
 		doNothing().when(imageExploratoryDao).save(any(Image.class));
 		when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
 				.thenReturn(mock(UpdateResult.class));
@@ -115,16 +124,16 @@
 				.thenReturn(expectedUuid);
 
 		String imageName = "someImageName", imageDescription = "someDescription";
-		String actualUuid = imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName,
-				imageDescription);
+		String actualUuid = imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME,
+				imageName, imageDescription);
 		assertNotNull(actualUuid);
 		assertEquals(expectedUuid, actualUuid);
 
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
 		verify(imageExploratoryDao).exist(imageName, PROJECT);
 		verify(imageExploratoryDao).save(any(Image.class));
-		verify(libDAO).getLibraries(USER, EXPLORATORY_NAME);
+		verify(libDAO).getLibraries(USER, PROJECT, EXPLORATORY_NAME);
 		verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO());
 		verify(endpointService).get(anyString());
 		verify(provisioningService).post(endpointDTO().getUrl() + "exploratory/image", TOKEN, eiDto, String.class);
@@ -134,37 +143,37 @@
 	@Test
 	public void createImageWhenMethodFetchRunningExploratoryFieldsThrowsException() {
 		doThrow(new DlabException("Running exploratory instance for user with name not found."))
-				.when(exploratoryDAO).fetchRunningExploratoryFields(anyString(), anyString());
+				.when(exploratoryDAO).fetchRunningExploratoryFields(anyString(), anyString(), anyString());
 
 		String imageName = "someImageName", imageDescription = "someDescription";
 
 		try {
-			imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+			imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
 		} catch (DlabException e) {
 			assertEquals("Running exploratory instance for user with name not found.", e.getMessage());
 		}
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO);
 	}
 
 	@Test
 	public void createImageWhenResourceAlreadyExists() {
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(true);
 
 		expectedException.expect(ResourceAlreadyExistException.class);
 		expectedException.expectMessage("Image with name someImageName is already exist");
 
 		String imageName = "someImageName", imageDescription = "someDescription";
-		imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+		imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
 	}
 
 	@Test
 	public void createImageWhenMethodNewExploratoryImageCreateThrowsException() {
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(false);
 
-		when(libDAO.getLibraries(anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
+		when(libDAO.getLibraries(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
 		doNothing().when(imageExploratoryDao).save(any(Image.class));
 		when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
 				.thenReturn(mock(UpdateResult.class));
@@ -174,16 +183,16 @@
 
 		String imageName = "someImageName", imageDescription = "someDescription";
 		try {
-			imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+			imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
 		} catch (DlabException e) {
 			assertEquals("Cannot create instance of resource class", e.getMessage());
 		}
 
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
 		verify(imageExploratoryDao).exist(imageName, PROJECT);
 		verify(imageExploratoryDao).save(any(Image.class));
-		verify(libDAO).getLibraries(USER, EXPLORATORY_NAME);
+		verify(libDAO).getLibraries(USER, PROJECT, EXPLORATORY_NAME);
 		verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO());
 		verify(endpointService).get(anyString());
 		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService);
@@ -194,13 +203,13 @@
 		when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
 				.thenReturn(mock(UpdateResult.class));
 		doNothing().when(imageExploratoryDao).updateImageFields(any(Image.class));
-		doNothing().when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString());
+		doNothing().when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString(), anyString());
 
 		String notebookIp = "someIp";
 		imageExploratoryService.finishImageCreate(image, EXPLORATORY_NAME, notebookIp);
 
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
-		verify(exploratoryDAO).updateExploratoryIp(USER, notebookIp, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateExploratoryIp(USER, PROJECT, notebookIp, EXPLORATORY_NAME);
 		verify(imageExploratoryDao).updateImageFields(image);
 		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
 	}
@@ -211,7 +220,7 @@
 				.thenReturn(mock(UpdateResult.class));
 		doNothing().when(imageExploratoryDao).updateImageFields(any(Image.class));
 		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString());
+				.when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString(), anyString());
 
 		String notebookIp = "someIp";
 		try {
@@ -221,7 +230,7 @@
 		}
 
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
-		verify(exploratoryDAO).updateExploratoryIp(USER, notebookIp, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateExploratoryIp(USER, PROJECT, notebookIp, EXPLORATORY_NAME);
 		verify(imageExploratoryDao).updateImageFields(image);
 		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
 	}
@@ -235,7 +244,7 @@
 		imageExploratoryService.finishImageCreate(image, EXPLORATORY_NAME, null);
 
 		verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
-		verify(exploratoryDAO, never()).updateExploratoryIp(USER, null, EXPLORATORY_NAME);
+		verify(exploratoryDAO, never()).updateExploratoryIp(USER, PROJECT, null, EXPLORATORY_NAME);
 		verify(imageExploratoryDao).updateImageFields(image);
 		verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
 	}
@@ -301,6 +310,7 @@
 				.description("someDescription")
 				.status(ImageStatus.CREATING)
 				.user(USER)
+				.project(PROJECT)
 				.libraries(Collections.singletonList(getLibrary()))
 				.computationalLibraries(Collections.emptyMap())
 				.dockerImage("someImageName")
@@ -313,8 +323,11 @@
 	}
 
 	private UserInstanceDTO getUserInstanceDto() {
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
-				.withExploratoryId("explId").withProject(PROJECT);
+		return new UserInstanceDTO()
+				.withUser(USER)
+				.withExploratoryName(EXPLORATORY_NAME)
+				.withExploratoryId("explId")
+				.withProject(PROJECT);
 	}
 
 	private UserInfo getUserInfo() {
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
index bc3ecbf..3677929 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
@@ -56,38 +56,46 @@
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class LibraryServiceImplTest {
 
-	private static final String LIB_NAME = "name";
-	private static final String LIB_GROUP = "group";
-	private static final String LIB_VERSION = "version";
-	private static final String UUID = "id";
-	private final String USER = "test";
-	private final String EXPLORATORY_NAME = "explName";
-	private final String COMPUTATIONAL_NAME = "compName";
+    private static final String LIB_NAME = "name";
+    private static final String LIB_GROUP = "group";
+    private static final String LIB_VERSION = "version";
+    private static final String UUID = "id";
+    private final String USER = "test";
+    private final String EXPLORATORY_NAME = "explName";
+    private final String PROJECT = "projectName";
+    private final String COMPUTATIONAL_NAME = "compName";
 
-	private LibInstallDTO liDto;
-	private List<LibInstallDTO> libs;
-	private LibInstallFormDTO libInstallFormDTO;
-	private LibraryInstallDTO libraryInstallDto;
+    private LibInstallDTO liDto;
+    private List<LibInstallDTO> libs;
+    private LibInstallFormDTO libInstallFormDTO;
+    private LibraryInstallDTO libraryInstallDto;
 
-	@Mock
-	private ExploratoryDAO exploratoryDAO;
-	@Mock
-	private ExploratoryLibDAO libraryDAO;
-	@Mock
-	private RequestBuilder requestBuilder;
-	@Mock
-	private RequestId requestId;
-	@Mock
-	private RESTService provisioningService;
-	@Mock
-	private EndpointService endpointService;
+    @Mock
+    private ExploratoryDAO exploratoryDAO;
+    @Mock
+    private ExploratoryLibDAO libraryDAO;
+    @Mock
+    private RequestBuilder requestBuilder;
+    @Mock
+    private RequestId requestId;
+    @Mock
+    private RESTService provisioningService;
+    @Mock
+    private EndpointService endpointService;
 
-	@Rule
+    @Rule
 	public ExpectedException expectedException = ExpectedException.none();
 
 	@InjectMocks
@@ -100,243 +108,245 @@
 
 	@Test
 	public void testGetLibs() {
-		Document document = new Document();
-		when(libraryDAO.findExploratoryLibraries(anyString(), anyString())).thenReturn(document);
+        Document document = new Document();
+        when(libraryDAO.findExploratoryLibraries(anyString(), anyString(), anyString())).thenReturn(document);
 
-		List<Document> expectedList = new ArrayList<>();
-		List<Document> actualList = libraryService.getLibs(USER, EXPLORATORY_NAME, "");
-		assertNotNull(actualList);
-		assertEquals(expectedList, actualList);
+        List<Document> expectedList = new ArrayList<>();
+        List<Document> actualList = libraryService.getLibs(USER, PROJECT, EXPLORATORY_NAME, "");
+        assertNotNull(actualList);
+        assertEquals(expectedList, actualList);
 
-		verify(libraryDAO).findExploratoryLibraries(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO);
-	}
+        verify(libraryDAO).findExploratoryLibraries(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO);
+    }
 
 	@Test
 	public void getLibInfo() {
-		Document document = new Document();
-		when(libraryDAO.findAllLibraries(anyString(), anyString())).thenReturn(document);
+        Document document = new Document();
+        when(libraryDAO.findAllLibraries(anyString(), anyString(), anyString())).thenReturn(document);
 
-		List<LibInfoRecord> expectedList = new ArrayList<>();
-		List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, EXPLORATORY_NAME);
-		assertNotNull(actualList);
-		assertEquals(expectedList, actualList);
+        List<LibInfoRecord> expectedList = new ArrayList<>();
+        List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(actualList);
+        assertEquals(expectedList, actualList);
 
-		verify(libraryDAO).findAllLibraries(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO);
-	}
+        verify(libraryDAO).findAllLibraries(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO);
+    }
 
 	@Test
 	public void getLibInfoWhenListsOfExploratoryAndComputationalLibsAreNotEmpty() {
-		when(libraryDAO.findAllLibraries(anyString(), anyString()))
-				.thenReturn(getDocumentWithExploratoryAndComputationalLibs());
+        when(libraryDAO.findAllLibraries(anyString(), anyString(), anyString()))
+                .thenReturn(getDocumentWithExploratoryAndComputationalLibs());
 
-		List<LibInfoRecord> expectedList = getLibInfoRecordList();
-		List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, EXPLORATORY_NAME);
-		assertNotNull(actualList);
-		assertEquals(expectedList, actualList);
+        List<LibInfoRecord> expectedList = getLibInfoRecordList();
+        List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, PROJECT, EXPLORATORY_NAME);
+        assertNotNull(actualList);
+        assertEquals(expectedList, actualList);
 
-		verify(libraryDAO).findAllLibraries(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO);
-	}
+        verify(libraryDAO).findAllLibraries(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO);
+    }
 
 	@Test
 	public void installComputationalLibsWithoutOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class))).thenReturn(libraryInstallDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class))).thenReturn(libraryInstallDTO);
 
 
-		final String uuid = libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				getLibs(null));
+        final String uuid = libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                COMPUTATIONAL_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
-				refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"), eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME),
-				refEq(libsToInstall.get(0)), eq(false));
-		verify(requestId).put(user.getName(), UUID);
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-	}
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
+                refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"), eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
+                eq(COMPUTATIONAL_NAME), refEq(libsToInstall.get(0)), eq(false));
+        verify(requestId).put(user.getName(), UUID);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installComputationalLibsWhenComputationalNotFound() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        libraryInstallDTO.setProject(PROJECT);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
-				.thenReturn(libraryInstallDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+                .thenReturn(libraryInstallDTO);
 
 
-		expectedException.expect(DlabException.class);
-		expectedException.expectMessage("Computational with name " + COMPUTATIONAL_NAME + "X was not found");
+        expectedException.expect(DlabException.class);
+        expectedException.expectMessage("Computational with name " + COMPUTATIONAL_NAME + "X was not found");
 
-		libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME + "X",
-				getLibs(null));
-	}
+        libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                COMPUTATIONAL_NAME + "X", getLibs(null));
+    }
 
 	@Test
 	public void installComputationalLibsWithOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setProject(PROJECT);
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
-				.thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+                .thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
 
-		final String uuid = libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				getLibs(null));
+        final String uuid = libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                COMPUTATIONAL_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		libsToInstall.get(0).setOverride(true);
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME),
-				refEq(libsToInstall.get(0)), eq(true));
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
-				refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"),
-				eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(requestId).put(user.getName(), UUID);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+        libsToInstall.get(0).setOverride(true);
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
+                eq(COMPUTATIONAL_NAME), refEq(libsToInstall.get(0)), eq(true));
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
+                refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"),
+                eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(requestId).put(user.getName(), UUID);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
 
-	}
+    }
 
 
 	@Test
 	public void installComputationalLibsWhenLibraryIsAlreadyInstalling() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
-				any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
-				.thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+                any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+                .thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
 
-		try {
-			libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-					getLibs(null));
-		} catch (DlabException e) {
-			assertEquals("Library name is already installing", e.getMessage());
-		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-	}
+        try {
+            libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+                    COMPUTATIONAL_NAME, getLibs(null));
+        } catch (DlabException e) {
+            assertEquals("Library name is already installing", e.getMessage());
+        }
+        verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installExploratoryLibsWithoutOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
-				anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
 
 
-		final String uuid = libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
+        final String uuid = libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(false));
-		verify(requestId).put(user.getName(), UUID);
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-	}
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(false));
+        verify(requestId).put(user.getName(), UUID);
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installExploratoryLibsWithOverride() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
-				anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
 
-		final String uuid = libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
+        final String uuid = libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
 
-		assertEquals(UUID, uuid);
+        assertEquals(UUID, uuid);
 
-		libsToInstall.get(0).setOverride(true);
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
-		verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(true));
-		verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
-				refEq(libraryInstallDTO), eq(String.class));
-		verify(requestId).put(USER, uuid);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
-	}
+        libsToInstall.get(0).setOverride(true);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+        verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(true));
+        verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
+                refEq(libraryInstallDTO), eq(String.class));
+        verify(requestId).put(USER, uuid);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+    }
 
 	@Test
 	public void installExploratoryLibsWhenLibIsAlreadyInstalling() {
-		final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
-		final List<LibInstallDTO> libsToInstall = getLibs("installing");
-		libraryInstallDTO.setLibs(libsToInstall);
-		final UserInfo user = getUser();
+        final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+        final List<LibInstallDTO> libsToInstall = getLibs("installing");
+        libraryInstallDTO.setLibs(libsToInstall);
+        final UserInfo user = getUser();
 
-		when(endpointService.get(anyString())).thenReturn(endpointDTO());
-		when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
-		when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
-		when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
-				anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
-		when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
+        when(endpointService.get(anyString())).thenReturn(endpointDTO());
+        when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+        when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+        when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+                anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+        when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
 
-		try {
-			libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
-		} catch (DlabException e) {
-			assertEquals("Library name is already installing", e.getMessage());
-		}
+        try {
+            libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
+        } catch (DlabException e) {
+            assertEquals("Library name is already installing", e.getMessage());
+        }
 
-		verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
-		verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+        verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+        verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+        verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
 
-	}
+    }
 
 	private Library getLibrary(LibStatus status) {
 		return new Library(LIB_GROUP, LIB_NAME, "1", status, "");
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
index 9b5e887..1aefbbb 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
@@ -25,13 +25,10 @@
 import com.epam.dlab.backendapi.domain.RequestId;
 import com.epam.dlab.backendapi.service.ExploratoryService;
 import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.dto.UserInstanceDTO;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyCallbackDTO;
-import com.epam.dlab.dto.reuploadkey.ReuploadKeyDTO;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatus;
 import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatusDTO;
-import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.model.ResourceData;
 import com.epam.dlab.model.ResourceType;
 import com.epam.dlab.rest.client.RESTService;
@@ -45,14 +42,16 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import static com.epam.dlab.dto.UserInstanceStatus.REUPLOADING_KEY;
 import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class ReuploadKeyServiceImplTest {
@@ -111,16 +110,16 @@
 	@Test
 	public void updateResourceDataForExploratoryWhenStatusCompleted() {
 		ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
-		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
+		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(), anyString(),
 				any(UserInstanceStatus.class))).thenReturn(mock(UpdateResult.class));
-		doNothing().when(exploratoryDAO).updateReuploadKeyForExploratory(anyString(), anyString(), anyBoolean());
+		doNothing().when(exploratoryDAO).updateReuploadKeyForExploratory(anyString(), anyString(), anyString(), anyBoolean());
 
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.COMPLETED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, RUNNING);
-		verify(exploratoryDAO).updateReuploadKeyForExploratory(USER, EXPLORATORY_NAME, false);
+		verify(exploratoryDAO).updateStatusForExploratory(USER, null, EXPLORATORY_NAME, RUNNING);
+		verify(exploratoryDAO).updateReuploadKeyForExploratory(USER, null, EXPLORATORY_NAME, false);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -128,14 +127,14 @@
 	@Test
 	public void updateResourceDataForExploratoryWhenStatusFailed() {
 		ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
-		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
+		when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(), anyString(),
 				any(UserInstanceStatus.class))).thenReturn(mock(UpdateResult.class));
 
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.FAILED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, RUNNING);
+		verify(exploratoryDAO).updateStatusForExploratory(USER, null, EXPLORATORY_NAME, RUNNING);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -144,16 +143,16 @@
 	public void updateResourceDataForClusterWhenStatusCompleted() {
 		ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME, "compName");
 		doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
-				any(UserInstanceStatus.class));
+				anyString(), any(UserInstanceStatus.class));
 		doNothing().when(computationalDAO).updateReuploadKeyFlagForComputationalResource(anyString(), anyString(),
-				anyString(), anyBoolean());
+				anyString(), anyString(), anyBoolean());
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.COMPLETED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME, "compName", RUNNING);
-		verify(computationalDAO).updateReuploadKeyFlagForComputationalResource(USER, EXPLORATORY_NAME, "compName",
-				false);
+		verify(computationalDAO).updateStatusForComputationalResource(USER, null, EXPLORATORY_NAME, "compName", RUNNING);
+		verify(computationalDAO).updateReuploadKeyFlagForComputationalResource(USER, null, EXPLORATORY_NAME,
+				"compName", false);
 		verifyNoMoreInteractions(computationalDAO);
 		verifyZeroInteractions(exploratoryDAO);
 	}
@@ -162,12 +161,12 @@
 	public void updateResourceDataForClusterWhenStatusFailed() {
 		ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME, "compName");
 		doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
-				any(UserInstanceStatus.class));
+				anyString(), any(UserInstanceStatus.class));
 		ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.FAILED);
 
 		reuploadKeyService.updateResourceData(dto);
 
-		verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME, "compName", RUNNING);
+		verify(computationalDAO).updateStatusForComputationalResource(USER, null, EXPLORATORY_NAME, "compName", RUNNING);
 		verifyNoMoreInteractions(computationalDAO);
 		verifyZeroInteractions(exploratoryDAO);
 	}
@@ -176,10 +175,6 @@
 		return new UserInfo(USER, TOKEN);
 	}
 
-	private UserInstanceDTO getUserInstance() {
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME);
-	}
-
 	private ReuploadKeyStatusDTO getReuploadKeyStatusDTO(ResourceData resource, ReuploadKeyStatus status) {
 		return new ReuploadKeyStatusDTO().withReuploadKeyCallbackDto(
 				new ReuploadKeyCallbackDTO().withResource(resource)).withReuploadKeyStatus(status).withUser(USER);
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
index 8a7d8ec..c025651 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
@@ -43,16 +43,40 @@
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
 import java.time.temporal.ChronoUnit;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Optional;
 import java.util.stream.Collectors;
 
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
 import static java.util.Collections.singletonList;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.mockito.Matchers.anyVararg;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
 public class SchedulerJobServiceImplTest {
@@ -92,84 +116,83 @@
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratory() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString()))
 				.thenReturn(Optional.of(schedulerJobDTO));
 
 		SchedulerJobDTO actualSchedulerJobDto =
-				schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+				schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		assertNotNull(actualSchedulerJobDto);
 		assertEquals(schedulerJobDTO, actualSchedulerJobDto);
 
-		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO, schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchSchedulerJobForUserAndExploratoryWhenNotebookNotExist() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString())).thenReturn(Optional.empty());
+		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString())).thenReturn(Optional.empty());
 		try {
-			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Scheduler job data not found for user test with exploratory explName", e.getMessage());
 		}
-		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchEmptySchedulerJobForUserAndExploratory() {
-		when(exploratoryDAO.isExploratoryExist(anyString(), anyString())).thenReturn(true);
-		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString()))
 				.thenReturn(Optional.empty());
 		try {
-			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+			schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Scheduler job data not found for user test with exploratory explName", e.getMessage());
 		}
-		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
-		verifyNoMoreInteractions(exploratoryDAO, schedulerJobDAO);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
+		verifyNoMoreInteractions(schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchSchedulerJobForComputationalResource() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(Optional.of(schedulerJobDTO));
 
 		SchedulerJobDTO actualSchedulerJobDto = schedulerJobService
-				.fetchSchedulerJobForComputationalResource(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+				.fetchSchedulerJobForComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		assertNotNull(actualSchedulerJobDto);
 		assertEquals(schedulerJobDTO, actualSchedulerJobDto);
 
-		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(computationalDAO, schedulerJobDAO);
 	}
 
 	@Test
 	public void fetchEmptySchedulerJobForComputationalResource() {
-		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString()))
+		when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(Optional.empty());
 		try {
-			schedulerJobService.fetchSchedulerJobForComputationalResource(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+			schedulerJobService.fetchSchedulerJobForComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Scheduler job data not found for user test with exploratory explName with " +
 					"computational resource compName", e.getMessage());
 		}
-		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(computationalDAO, schedulerJobDAO);
 	}
 
 	@Test
 	public void updateSchedulerDataForUserAndExploratory() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).updateSchedulerSyncFlag(USER, EXPLORATORY_NAME, false);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).updateSchedulerSyncFlag(USER, PROJECT, EXPLORATORY_NAME, false);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -177,13 +200,13 @@
 	@Test
 	public void updateSchedulerDataForUserAndExploratoryWhenMethodFetchExploratoryFieldsThrowsException() {
 		doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
-				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+				.when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
 		try {
-			schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+			schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Exploratory for user with name not found", e.getMessage());
 		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -191,14 +214,14 @@
 	@Test
 	public void updateSchedulerDataForUserAndExploratoryWithInapproprietaryStatus() {
 		userInstance.withStatus("terminated");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		try {
-			schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+			schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 		} catch (ResourceInappropriateStateException e) {
 			assertEquals("Can not create/update scheduler for user instance with status: terminated",
 					e.getMessage());
 		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -208,21 +231,21 @@
 		schedulerJobDTO.setBeginDate(null);
 		schedulerJobDTO.setTimeZoneOffset(null);
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
 		assertNull(schedulerJobDTO.getBeginDate());
 		assertNull(schedulerJobDTO.getTimeZoneOffset());
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
 		assertEquals(LocalDate.now(), schedulerJobDTO.getBeginDate());
 		assertEquals(OffsetDateTime.now(ZoneId.systemDefault()).getOffset(), schedulerJobDTO.getTimeZoneOffset());
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).updateSchedulerSyncFlag(USER, EXPLORATORY_NAME, false);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).updateSchedulerSyncFlag(USER, PROJECT, EXPLORATORY_NAME, false);
 		verifyNoMoreInteractions(exploratoryDAO);
 		verifyZeroInteractions(computationalDAO);
 	}
@@ -232,25 +255,24 @@
 	public void updateSchedulerDataForUserAndExploratoryWithSyncStartRequiredParam() {
 		userInstance.withStatus("running");
 		schedulerJobDTO.setSyncStartRequired(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), any(List.class),
-				anyString(), anyVararg())).thenReturn(singletonList(COMPUTATIONAL_NAME));
+		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), anyString(),
+				any(List.class), anyString(), anyVararg())).thenReturn(singletonList(COMPUTATIONAL_NAME));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER,
-				singletonList(DataEngineType.SPARK_STANDALONE),
-				EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER, PROJECT,
+				singletonList(DataEngineType.SPARK_STANDALONE), EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
 		schedulerJobDTO.setEndTime(null);
 		schedulerJobDTO.setStopDaysRepeat(Collections.emptyList());
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
-				COMPUTATIONAL_NAME, schedulerJobDTO);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+				EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -259,19 +281,18 @@
 	public void updateSchedulerDataForUserAndExploratoryWithSyncStartRequiredParamButAbsenceClusters() {
 		userInstance.withStatus("running");
 		schedulerJobDTO.setSyncStartRequired(true);
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
 				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
-		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), any(List.class),
-				anyString(), anyVararg())).thenReturn(Collections.emptyList());
+		when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), anyString(),
+				any(List.class), anyString(), anyVararg())).thenReturn(Collections.emptyList());
 
-		schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+		schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
-		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER,
-				singletonList(DataEngineType.SPARK_STANDALONE),
-				EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+		verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER, PROJECT,
+				singletonList(DataEngineType.SPARK_STANDALONE), EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -279,30 +300,30 @@
 	@Test
 	public void updateSchedulerDataForComputationalResource() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
-		schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				schedulerJobDTO);
-
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
+		schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
 				COMPUTATIONAL_NAME, schedulerJobDTO);
+
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+				EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
 	@Test
 	public void updateSchedulerDataForComputationalResourceWhenSchedulerIsNull() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
 		final SchedulerJobDTO schedulerJobDTO = getSchedulerJobDTO(LocalDate.now(), LocalDate.now().plusDays(1),
 				Arrays.asList(DayOfWeek.values()), Arrays.asList(DayOfWeek.values()), false,
@@ -310,12 +331,12 @@
 				LocalTime.now().truncatedTo(ChronoUnit.MINUTES));
 		schedulerJobDTO.setStartDaysRepeat(null);
 		schedulerJobDTO.setStopDaysRepeat(null);
-		schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				schedulerJobDTO);
+		schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
+				COMPUTATIONAL_NAME, schedulerJobDTO);
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(eq(USER), eq(EXPLORATORY_NAME),
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
 				eq(COMPUTATIONAL_NAME), refEq(schedulerJobDTO));
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
@@ -323,18 +344,17 @@
 	@Test
 	public void updateSchedulerDataForComputationalResourceWhenMethodFetchComputationalFieldsThrowsException() {
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
 		doThrow(new ResourceNotFoundException("Computational resource for user with name not found"))
-				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
+				.when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
 		try {
-			schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME,
-					COMPUTATIONAL_NAME, schedulerJobDTO);
+			schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		} catch (ResourceNotFoundException e) {
 			assertEquals("Computational resource for user with name not found", e.getMessage());
 		}
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -342,18 +362,17 @@
 	public void updateSchedulerDataForComputationalResourceWithInapproprietaryClusterStatus() {
 		userInstance.setStatus("running");
 		userInstance.getResources().get(0).setStatus("terminated");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		try {
-			schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME,
-					COMPUTATIONAL_NAME, schedulerJobDTO);
+			schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		} catch (ResourceInappropriateStateException e) {
 			assertEquals("Can not create/update scheduler for user instance with status: terminated",
 					e.getMessage());
 		}
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -362,25 +381,25 @@
 		schedulerJobDTO.setBeginDate(null);
 		schedulerJobDTO.setTimeZoneOffset(null);
 		userInstance.withStatus("running");
-		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
-		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+		when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+		when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
 				.thenReturn(userInstance.getResources().get(0));
 		when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
-				any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+				anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
 
 		assertNull(schedulerJobDTO.getBeginDate());
 		assertNull(schedulerJobDTO.getTimeZoneOffset());
 
-		schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
-				schedulerJobDTO);
+		schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
+				COMPUTATIONAL_NAME, schedulerJobDTO);
 
 		assertEquals(LocalDate.now(), schedulerJobDTO.getBeginDate());
 		assertEquals(OffsetDateTime.now(ZoneId.systemDefault()).getOffset(), schedulerJobDTO.getTimeZoneOffset());
 
-		verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
-		verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
-		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
-				COMPUTATIONAL_NAME, schedulerJobDTO);
+		verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+		verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+		verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+				EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
 		verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
 	}
 
@@ -499,7 +518,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO)
 				.getComputationalSchedulerDataWithOneOfStatus(RUNNING, DataEngineType.SPARK_STANDALONE, RUNNING);
-		verify(computationalService).stopSparkCluster(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
+		verify(computationalService).stopSparkCluster(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME),
 				eq(COMPUTATIONAL_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService);
 	}
@@ -597,7 +616,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerWithStatusAndClusterLastActivityLessThan(eq(RUNNING),
 				any(Date.class));
-		verify(exploratoryService).stop(refEq(getUserInfo()), eq(EXPLORATORY_NAME));
+		verify(exploratoryService).stop(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService);
 	}
 
@@ -711,7 +730,7 @@
 				)));
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
 		when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
-				any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+				anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
 				DataEngineType.SPARK_STANDALONE, true)));
 
 		schedulerJobService.startExploratoryByScheduler();
@@ -719,7 +738,7 @@
 		verify(securityService, times(2)).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
 		verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
-		verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+		verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
 		verify(computationalService).startSparkCluster(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
 				eq(COMPUTATIONAL_NAME), eq(PROJECT));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalService,
@@ -738,7 +757,7 @@
 				)));
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
 		when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
-				any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+				anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
 				DataEngineType.CLOUD_SERVICE, true)));
 
 		schedulerJobService.startExploratoryByScheduler();
@@ -746,7 +765,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
 		verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
-		verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+		verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalDAO);
 		verifyZeroInteractions(computationalService);
 	}
@@ -763,7 +782,7 @@
 				)));
 		when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
 		when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
-				any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+				anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
 				DataEngineType.SPARK_STANDALONE, false)));
 
 		schedulerJobService.startExploratoryByScheduler();
@@ -771,7 +790,7 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
 		verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
-		verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+		verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalDAO);
 		verifyZeroInteractions(computationalService);
 	}
@@ -862,8 +881,8 @@
 		verify(securityService).getServiceAccountInfo(USER);
 		verify(schedulerJobDAO)
 				.getComputationalSchedulerDataWithOneOfStatus(RUNNING, STOPPED, RUNNING);
-		verify(computationalService).terminateComputational(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
-				eq(COMPUTATIONAL_NAME));
+		verify(computationalService).terminateComputational(refEq(getUserInfo()), eq(PROJECT),
+				eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService);
 	}
 
@@ -955,7 +974,7 @@
 
 		verify(securityService).getUserInfoOffline(USER);
 		verify(schedulerJobDAO).getExploratorySchedulerDataWithOneOfStatus(RUNNING, STOPPED);
-		verify(exploratoryService).terminate(refEq(getUserInfo()), eq(EXPLORATORY_NAME));
+		verify(exploratoryService).terminate(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME));
 		verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService, exploratoryService);
 	}
 
@@ -1085,8 +1104,11 @@
 	private UserInstanceDTO getUserInstanceDTO() {
 		UserComputationalResource computationalResource = new UserComputationalResource();
 		computationalResource.setStatus("running");
-		return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
-				.withResources(singletonList(computationalResource));
+		return new UserInstanceDTO()
+				.withUser(USER)
+				.withExploratoryName(EXPLORATORY_NAME)
+				.withResources(singletonList(computationalResource))
+				.withProject(PROJECT);
 	}
 
 	private AwsComputationalResource getComputationalResource(DataEngineType dataEngineType,
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java
index 305e852..4fec7c6 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/UserGroupServiceImplTest.java
@@ -23,10 +23,13 @@
 import com.epam.dlab.backendapi.dao.UserGroupDao;
 import com.epam.dlab.backendapi.dao.UserRoleDao;
 import com.epam.dlab.backendapi.domain.ProjectDTO;
+import com.epam.dlab.backendapi.resources.TestBase;
 import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
 import com.epam.dlab.dto.UserInstanceStatus;
 import com.epam.dlab.exceptions.DlabException;
 import com.epam.dlab.exceptions.ResourceNotFoundException;
+import io.dropwizard.auth.AuthenticationException;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -36,39 +39,50 @@
 import org.mockito.runners.MockitoJUnitRunner;
 
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.List;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anySet;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
-public class UserGroupServiceImplTest {
+public class UserGroupServiceImplTest extends TestBase {
 
-	private static final String ROLE_ID = "Role id";
-	private static final String USER = "test";
-	private static final String GROUP = "admin";
-	@Mock
-	private UserRoleDao userRoleDao;
-	@Mock
-	private UserGroupDao userGroupDao;
-	@Mock
-	private ProjectDAO projectDAO;
-	@InjectMocks
-	private UserGroupServiceImpl userGroupService;
+    private static final String ROLE_ID = "Role id";
+    private static final String USER = "test";
+    private static final String GROUP = "admin";
+    @Mock
+    private UserRoleDao userRoleDao;
+    @Mock
+    private UserGroupDao userGroupDao;
+    @Mock
+    private ProjectDAO projectDAO;
+    @InjectMocks
+    private UserGroupServiceImpl userGroupService;
 
-	@Rule
-	public ExpectedException expectedException = ExpectedException.none();
+    @Rule
+    public ExpectedException expectedException = ExpectedException.none();
 
-	@Test
-	public void createGroup() {
-		when(userRoleDao.addGroupToRole(anySet(), anySet())).thenReturn(true);
+    @Before
+    public void setup() throws AuthenticationException {
+        authSetup();
+    }
 
-		userGroupService.createGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+    @Test
+    public void createGroup() {
+        when(userRoleDao.addGroupToRole(anySet(), anySet())).thenReturn(true);
 
-		verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-		verify(userGroupDao).addUsers(GROUP, Collections.singleton(USER));
-	}
+        userGroupService.createGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+
+        verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
+        verify(userGroupDao).addUsers(GROUP, Collections.singleton(USER));
+    }
 
 	@Test
 	public void createGroupWithNoUsers() {
@@ -77,7 +91,7 @@
 		userGroupService.createGroup(GROUP, Collections.singleton(ROLE_ID), Collections.emptySet());
 
 		verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-		verify(userGroupDao, never()).addUsers(anyString(), anySet());
+		verify(userGroupDao).addUsers(anyString(), anySet());
 	}
 
 	@Test
@@ -89,70 +103,6 @@
 	}
 
 	@Test
-	public void getAggregatedRoles() {
-		when(userRoleDao.aggregateRolesByGroup()).thenReturn(Collections.singletonList(getUserGroup()));
-
-		final List<UserGroupDto> aggregatedRolesByGroup = userGroupService.getAggregatedRolesByGroup();
-
-		assertEquals(1, aggregatedRolesByGroup.size());
-		assertEquals(GROUP, aggregatedRolesByGroup.get(0).getGroup());
-		assertTrue(aggregatedRolesByGroup.get(0).getRoles().isEmpty());
-
-		verify(userRoleDao).aggregateRolesByGroup();
-		verifyNoMoreInteractions(userRoleDao);
-	}
-
-	@Test
-	public void addUserToGroup() {
-		userGroupService.addUsersToGroup(GROUP, Collections.singleton(USER));
-
-		verify(userGroupDao).addUsers(eq(GROUP), refEq(Collections.singleton(USER)));
-		verifyNoMoreInteractions(userRoleDao, userGroupDao);
-	}
-
-	@Test
-	public void addRolesToGroup() {
-		when(userRoleDao.addGroupToRole(anySetOf(String.class), anySetOf(String.class))).thenReturn(true);
-
-		userGroupService.updateRolesForGroup(GROUP, Collections.singleton(ROLE_ID));
-
-		verify(userRoleDao).addGroupToRole(refEq(Collections.singleton(GROUP)), refEq(Collections.singleton(ROLE_ID)));
-		verify(userRoleDao).removeGroupWhenRoleNotIn(GROUP, Collections.singleton(ROLE_ID));
-		verifyNoMoreInteractions(userRoleDao);
-	}
-
-	@Test
-	public void removeUserFromGroup() {
-
-		userGroupService.removeUserFromGroup(GROUP, USER);
-
-		verify(userGroupDao).removeUser(GROUP, USER);
-		verifyNoMoreInteractions(userGroupDao);
-	}
-
-	@Test
-	public void removeGroupFromRole() {
-
-		when(userRoleDao.removeGroupFromRole(anySetOf(String.class), anySetOf(String.class))).thenReturn(true);
-
-		userGroupService.removeGroupFromRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-
-		verify(userRoleDao).removeGroupFromRole(refEq(Collections.singleton(GROUP)),
-				refEq(Collections.singleton(ROLE_ID)));
-		verifyNoMoreInteractions(userRoleDao);
-	}
-
-	@Test
-	public void removeGroupFromRoleWithException() {
-		when(userRoleDao.removeGroupFromRole(anySetOf(String.class), anySetOf(String.class))).thenReturn(false);
-
-		expectedException.expectMessage("Any of role : [" + ROLE_ID + "] were not found");
-		expectedException.expect(ResourceNotFoundException.class);
-
-		userGroupService.removeGroupFromRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-	}
-
-	@Test
 	public void removeGroup() {
 
 		when(userRoleDao.removeGroup(anyString())).thenReturn(true);
@@ -220,17 +170,13 @@
 		userGroupService.removeGroup(GROUP);
 	}
 
-	@Test
-	public void updateGroup() {
-		userGroupService.updateGroup(GROUP, Collections.singleton(ROLE_ID), Collections.singleton(USER));
+    private UserGroupDto getUserGroup() {
+        return new UserGroupDto(GROUP, Collections.emptyList(), Collections.emptySet());
+    }
 
-		verify(userGroupDao).updateUsers(GROUP, Collections.singleton(USER));
-		verify(userRoleDao).removeGroupWhenRoleNotIn(GROUP, Collections.singleton(ROLE_ID));
-		verify(userRoleDao).addGroupToRole(Collections.singleton(GROUP), Collections.singleton(ROLE_ID));
-		verifyNoMoreInteractions(userRoleDao, userGroupDao);
-	}
-
-	private UserGroupDto getUserGroup() {
-		return new UserGroupDto(GROUP, Collections.emptyList(), Collections.emptySet());
-	}
+    private List<ProjectDTO> getProjects() {
+        return Collections.singletonList(ProjectDTO.builder()
+                .groups(new HashSet<>(Collections.singletonList(GROUP)))
+                .build());
+    }
 }
\ No newline at end of file