Merge branch 'odahu-integration' into develop
diff --git a/README.md b/README.md
index eff3738..6722e59 100644
--- a/README.md
+++ b/README.md
@@ -478,7 +478,6 @@
| action | In case of SSN node creation, this parameter should be set to “create”|
| workspace\_path | Path to DLab sources root
| conf\_image\_enabled | Enable or Disable creating image at first time |
-| conf\_shared\_image\_enabled | Enable or Disable shared images |
**Note:** If the following parameters are not specified, they will be created automatically:
- aws\_vpc\_id
@@ -533,7 +532,6 @@
| azure\_ad\_group\_id | ID of group in Active directory whose members have full access to shared folder in Azure Data Lake Store |
| action | In case of SSN node creation, this parameter should be set to “create” |
| conf\_image\_enabled | Enable or Disable creating image at first time |
-| conf\_shared\_image\_enabled | Enable or Disable shared images |
**Note:** If the following parameters are not specified, they will be created automatically:
@@ -601,7 +599,6 @@
| gcp\_project\_id | ID of GCP project |
| action | In case of SSN node creation, this parameter should be set to “create” |
| conf\_image\_enabled | Enable or Disable creating image at first time |
-| conf\_shared\_image\_enabled | Enable or Disable shared images |
| billing\_dataset\_name | Name of GCP dataset (BigQuery service) |
**Note:** If you gonna use Dataproc cluster, be aware that Dataproc has limited availability in GCP regions. [Cloud Dataproc availability by Region in GCP](https://cloud.google.com/about/locations/)
diff --git a/infrastructure-provisioning/scripts/deploy_dlab.py b/infrastructure-provisioning/scripts/deploy_dlab.py
index 73eb9d7..cd27a5b 100644
--- a/infrastructure-provisioning/scripts/deploy_dlab.py
+++ b/infrastructure-provisioning/scripts/deploy_dlab.py
@@ -40,7 +40,6 @@
parser.add_argument('--conf_additional_tags', type=str, default='', help='Additional tags in format '
'"Key1:Value1;Key2:Value2"')
parser.add_argument('--conf_image_enabled', type=str, default='', help='Enable or Disable creating image at first time')
-parser.add_argument('--conf_shared_image_enabled', type=str, default='', help='Enable or Disable shared images')
parser.add_argument('--aws_user_predefined_s3_policies', type=str, default='', help='Predefined policies for users '
'instances')
parser.add_argument('--aws_access_key', type=str, default='', help='AWS Access Key ID')
diff --git a/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
index 2c283f5..3864229 100644
--- a/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
+++ b/infrastructure-provisioning/scripts/deploy_repository/deploy_repository.py
@@ -1322,7 +1322,7 @@
ec2_client = boto3.client('ec2', region_name=args.region)
efs_client = boto3.client('efs', region_name=args.region)
route53_client = boto3.client('route53')
- tag_name = args.service_base_name + '-Tag'
+ tag_name = args.service_base_name + '-tag'
pre_defined_vpc = True
pre_defined_subnet = True
pre_defined_sg = True
diff --git a/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py b/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
index c37da53..741ca18 100644
--- a/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
+++ b/infrastructure-provisioning/src/deeplearning/scripts/configure_deep_learning_node.py
@@ -41,7 +41,7 @@
parser.add_argument('--spark_version', type=str, default='')
parser.add_argument('--hadoop_version', type=str, default='')
parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
parser.add_argument('--exploratory_name', type=str, default='')
parser.add_argument('--edge_ip', type=str, default='')
args = parser.parse_args()
@@ -162,7 +162,7 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress)
+ install_inactivity_checker(args.os_user, args.ip_address)
# INSTALL OPTIONAL PACKAGES
print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/edge/fabfile.py b/infrastructure-provisioning/src/edge/fabfile.py
index edbed71..66a656b 100644
--- a/infrastructure-provisioning/src/edge/fabfile.py
+++ b/infrastructure-provisioning/src/edge/fabfile.py
@@ -45,44 +45,6 @@
sys.exit(1)
-#def run():
-# local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
-# os.environ['request_id'])
-# local_log_filepath = "/logs/edge/" + local_log_filename
-# logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
-# level=logging.DEBUG,
-# filename=local_log_filepath)
-#
-# try:
-# local("~/scripts/{}.py".format('edge_prepare'))
-# except Exception as err:
-# traceback.print_exc()
-# append_result("Failed preparing Edge node.", str(err))
-# sys.exit(1)
-#
-# try:
-# local("~/scripts/{}.py".format('edge_configure'))
-# except Exception as err:
-# traceback.print_exc()
-# append_result("Failed configuring Edge node.", str(err))
-# sys.exit(1)
-
-
-# Main function for terminating EDGE node and exploratory environment if exists
-#def terminate():
-# local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
-# local_log_filepath = "/logs/edge/" + local_log_filename
-# logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
-# level=logging.DEBUG,
-# filename=local_log_filepath)
-# try:
-# local("~/scripts/{}.py".format('edge_terminate'))
-# except Exception as err:
-# traceback.print_exc()
-# append_result("Failed terminating Edge node.", str(err))
-# sys.exit(1)
-
-
# Main function for stopping EDGE node
def stop():
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
@@ -111,41 +73,3 @@
traceback.print_exc()
append_result("Failed starting Edge node.", str(err))
sys.exit(1)
-
-
-#def recreate():
-# local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
-# os.environ['request_id'])
-# local_log_filepath = "/logs/edge/" + local_log_filename
-# logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
-# level=logging.DEBUG,
-# filename=local_log_filepath)
-#
-# try:
-# local("~/scripts/{}.py".format('edge_prepare'))
-# except Exception as err:
-# traceback.print_exc()
-# append_result("Failed preparing Edge node.", str(err))
-# sys.exit(1)
-#
-# try:
-# local("~/scripts/{}.py".format('edge_configure'))
-# except Exception as err:
-# traceback.print_exc()
-# append_result("Failed configuring Edge node.", str(err))
-# sys.exit(1)
-
-#def reupload_key():
-# local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
-# os.environ['request_id'])
-# local_log_filepath = "/logs/edge/" + local_log_filename
-# logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
-# level=logging.DEBUG,
-# filename=local_log_filepath)
-#
-# try:
-# local("~/scripts/{}.py".format('reupload_ssh_key'))
-# except Exception as err:
-# traceback.print_exc()
-# append_result("Failed to reupload key on Edge node.", str(err))
-# sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/conf/dlab.ini b/infrastructure-provisioning/src/general/conf/dlab.ini
index c542259..e3db900 100644
--- a/infrastructure-provisioning/src/general/conf/dlab.ini
+++ b/infrastructure-provisioning/src/general/conf/dlab.ini
@@ -47,7 +47,7 @@
### Enable or Disable creating image at first time
image_enabled = true
###Enable or Disable shared images
-shared_image_enabled = true
+#shared_image_enabled = true
### CIDR of VPC
vpc_cidr = '172.31.0.0/16'
### CIDR of second VPC
@@ -292,6 +292,8 @@
jupyterlab_image = odahu\\/base-notebook:1.1.0-rc8
### Superset version
superset_version = 0.35.1
+### GCS-connector version
+gcs_connector_version = 2.0.1
#--- [emr] section contains all parameters that are using for emr provisioning ---#
[emr]
diff --git a/infrastructure-provisioning/src/general/files/gcp/jupyterlab_description.json b/infrastructure-provisioning/src/general/files/gcp/jupyterlab_description.json
index 8a376bf..3f202f7 100644
--- a/infrastructure-provisioning/src/general/files/gcp/jupyterlab_description.json
+++ b/infrastructure-provisioning/src/general/files/gcp/jupyterlab_description.json
@@ -1,4 +1,4 @@
-d{
+{
"exploratory_environment_shapes" :
{
"For testing" : [
diff --git a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
index cb0a7ad..7be5328 100644
--- a/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/aws/actions_lib.py
@@ -306,10 +306,10 @@
client = boto3.client('ec2')
try:
route_tables = client.describe_route_tables(
- Filters=[{'Name': 'tag:{}-Tag'.format(service_base_name), 'Values': ['{}'.format(
+ Filters=[{'Name': 'tag:{}-tag'.format(service_base_name), 'Values': ['{}'.format(
service_base_name)]}]).get('RouteTables')
route_tables2 = client.describe_route_tables(Filters=[
- {'Name': 'tag:{}-secondary-Tag'.format(service_base_name), 'Values': ['{}'.format(
+ {'Name': 'tag:{}-secondary-tag'.format(service_base_name), 'Values': ['{}'.format(
service_base_name)]}]).get('RouteTables')
for table in route_tables:
routes = table.get('Routes')
@@ -344,7 +344,7 @@
try:
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
- tag = {"Key": service_base_name + '-Tag', "Value": service_base_name}
+ tag = {"Key": service_base_name + '-tag', "Value": service_base_name}
tag_name = {"Key": 'Name', "Value": "{0}-peering-connection".format(service_base_name)}
peering = ec2.create_vpc_peering_connection(PeerVpcId=vpc_id, VpcId=vpc2_id)
client.accept_vpc_peering_connection(VpcPeeringConnectionId=peering.id)
@@ -510,7 +510,7 @@
cluster = client.list_instances(ClusterId=cluster_id)
instances = cluster['Instances']
for instance in instances:
- instance_tag = {'Key': os.environ['conf_service_base_name'] + '-Tag',
+ instance_tag = {'Key': os.environ['conf_service_base_name'] + '-tag',
'Value': node_name}
tag_intance_volume(instance['Ec2InstanceId'], node_name, instance_tag)
except Exception as err:
@@ -888,37 +888,42 @@
traceback.print_exc(file=sys.stdout)
-def remove_all_iam_resources(instance_type, scientist=''):
+def remove_all_iam_resources(instance_type, project_name='', endpoint_name=''):
try:
client = boto3.client('iam')
- service_base_name = os.environ['conf_service_base_name'].lower().replace('-', '_')
+ service_base_name = os.environ['conf_service_base_name']
roles_list = []
+ if project_name:
+ start_prefix = '{}-{}-{}-'.format(service_base_name, project_name, endpoint_name)
+ else:
+ start_prefix = '{}-'.format(service_base_name)
for item in client.list_roles(MaxItems=250).get("Roles"):
- if item.get("RoleName").startswith(service_base_name + '-'):
+ if item.get("RoleName").startswith(start_prefix):
roles_list.append(item.get('RoleName'))
if roles_list:
roles_list.sort(reverse=True)
for iam_role in roles_list:
- if '-ssn-Role' in iam_role and instance_type == 'ssn' or instance_type == 'all':
+ if '-ssn-role' in iam_role and instance_type == 'ssn' or instance_type == 'all':
try:
- client.delete_role_policy(RoleName=iam_role, PolicyName='{0}-ssn-Policy'.format(
+ client.delete_role_policy(RoleName=iam_role, PolicyName='{0}-ssn-policy'.format(
service_base_name))
except:
- print('There is no policy {}-ssn-Policy to delete'.format(service_base_name))
+ print('There is no policy {}-ssn-policy to delete'.format(service_base_name))
role_profiles = client.list_instance_profiles_for_role(RoleName=iam_role).get('InstanceProfiles')
if role_profiles:
for i in role_profiles:
role_profile_name = i.get('InstanceProfileName')
- if role_profile_name == '{0}-ssn-Profile'.format(service_base_name):
+ if role_profile_name == '{0}-ssn-profile'.format(service_base_name):
remove_roles_and_profiles(iam_role, role_profile_name)
else:
print("There is no instance profile for {}".format(iam_role))
client.delete_role(RoleName=iam_role)
print("The IAM role {} has been deleted successfully".format(iam_role))
- if '-edge-Role' in iam_role:
- if instance_type == 'edge' and scientist in iam_role:
+ if '-edge-role' in iam_role:
+ if instance_type == 'edge' and project_name in iam_role:
remove_detach_iam_policies(iam_role, 'delete')
- role_profile_name = '{0}-{1}-edge-Profile'.format(service_base_name, scientist)
+ role_profile_name = '{0}-{1}-{2}-edge-profile'.format(service_base_name, project_name,
+ os.environ['endpoint_name'].lower())
try:
client.get_instance_profile(InstanceProfileName=role_profile_name)
remove_roles_and_profiles(iam_role, role_profile_name)
@@ -938,10 +943,11 @@
print("There is no instance profile for {}".format(iam_role))
client.delete_role(RoleName=iam_role)
print("The IAM role {} has been deleted successfully".format(iam_role))
- if '-nb-de-Role' in iam_role:
- if instance_type == 'notebook' and scientist in iam_role:
+ if '-nb-de-role' in iam_role:
+ if instance_type == 'notebook' and project_name in iam_role:
remove_detach_iam_policies(iam_role)
- role_profile_name = '{0}-{1}-{2}-nb-de-Profile'.format(service_base_name, scientist, os.environ['endpoint_name'])
+ role_profile_name = '{0}-{1}-{2}-nb-de-profile'.format(service_base_name, project_name,
+ os.environ['endpoint_name'].lower())
try:
client.get_instance_profile(InstanceProfileName=role_profile_name)
remove_roles_and_profiles(iam_role, role_profile_name)
@@ -965,22 +971,22 @@
print("There are no IAM roles to delete. Checking instance profiles...")
profile_list = []
for item in client.list_instance_profiles(MaxItems=250).get("InstanceProfiles"):
- if item.get("InstanceProfileName").startswith('{}-'.format(service_base_name)):
+ if item.get("InstanceProfileName").startswith(start_prefix):
profile_list.append(item.get('InstanceProfileName'))
if profile_list:
for instance_profile in profile_list:
- if '-ssn-Profile' in instance_profile and instance_type == 'ssn' or instance_type == 'all':
+ if '-ssn-profile' in instance_profile and instance_type == 'ssn' or instance_type == 'all':
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
- if '-edge-Profile' in instance_profile:
- if instance_type == 'edge' and scientist in instance_profile:
+ if '-edge-profile' in instance_profile:
+ if instance_type == 'edge' and project_name in instance_profile:
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
if instance_type == 'all':
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
- if '-nb-de-Profile' in instance_profile:
- if instance_type == 'notebook' and scientist in instance_profile:
+ if '-nb-de-profile' in instance_profile:
+ if instance_type == 'notebook' and project_name in instance_profile:
client.delete_instance_profile(InstanceProfileName=instance_profile)
print("The instance profile {} has been deleted successfully".format(instance_profile))
if instance_type == 'all':
@@ -1037,7 +1043,7 @@
if bucket_name in item.get('Name'):
for i in client.get_bucket_tagging(Bucket=item.get('Name')).get('TagSet'):
i.get('Key')
- if i.get('Key') == os.environ['conf_service_base_name'] + '-Tag':
+ if i.get('Key') == os.environ['conf_service_base_name'].lower() + '-tag':
bucket_list.append(item.get('Name'))
for s3bucket in bucket_list:
if s3bucket:
@@ -1060,8 +1066,8 @@
try:
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
- tag_name = os.environ['conf_service_base_name'] + '-Tag'
- tag2_name = os.environ['conf_service_base_name'] + '-secondary-Tag'
+ tag_name = os.environ['conf_service_base_name'].lower() + '-tag'
+ tag2_name = os.environ['conf_service_base_name'].lower() + '-secondary-tag'
subnets = ec2.subnets.filter(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [tag_value]}])
subnets2 = ec2.subnets.filter(
@@ -1087,7 +1093,7 @@
def remove_peering(tag_value):
try:
client = boto3.client('ec2')
- tag_name = os.environ['conf_service_base_name'] + '-Tag'
+ tag_name = os.environ['conf_service_base_name'].lower() + '-tag'
if os.environ['conf_duo_vpc_enable'] == 'true':
peering_id = client.describe_vpc_peering_connections(Filters=[
{'Name': 'tag-key', 'Values': [tag_name]},
diff --git a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
index 9c4c636..8cac3c4 100644
--- a/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/azure/actions_lib.py
@@ -521,7 +521,7 @@
},
'os_disk': {
'os_type': 'Linux',
- 'name': '{}-ssn-disk0'.format(service_base_name),
+ 'name': '{}-ssn-volume-primary'.format(service_base_name),
'create_option': 'fromImage',
'disk_size_gb': int(primary_disk_size),
'tags': tags,
@@ -531,7 +531,7 @@
}
},
'os_profile': {
- 'computer_name': instance_name,
+ 'computer_name': instance_name.replace('_', '-'),
'admin_username': dlab_ssh_user_name,
'linux_configuration': {
'disable_password_authentication': True,
@@ -568,7 +568,8 @@
},
'os_disk': {
'os_type': 'Linux',
- 'name': '{}-{}-edge-disk0'.format(service_base_name, project_name),
+ 'name': '{}-{}-{}-edge-volume-primary'.format(service_base_name, project_name,
+ os.environ['endpoint_name'].lower()),
'create_option': create_option,
'disk_size_gb': int(primary_disk_size),
'tags': tags,
@@ -578,7 +579,7 @@
}
},
'os_profile': {
- 'computer_name': instance_name,
+ 'computer_name': instance_name.replace('_', '-'),
'admin_username': dlab_ssh_user_name,
'linux_configuration': {
'disable_password_authentication': True,
@@ -608,7 +609,8 @@
'storage_profile': {
'os_disk': {
'os_type': 'Linux',
- 'name': '{}-{}-edge-disk0'.format(service_base_name, project_name),
+ 'name': '{}-{}-{}-edge-volume-primary'.format(service_base_name, project_name,
+ os.environ['endpoint_name'].lower()),
'create_option': create_option,
'disk_size_gb': int(primary_disk_size),
'tags': tags,
@@ -637,7 +639,7 @@
},
'os_disk': {
'os_type': 'Linux',
- 'name': '{}-disk0'.format(instance_name),
+ 'name': '{}-volume-primary'.format(instance_name),
'create_option': 'fromImage',
'disk_size_gb': int(primary_disk_size),
'tags': tags,
@@ -648,11 +650,11 @@
'data_disks': [
{
'lun': 1,
- 'name': '{}-disk1'.format(instance_name),
+ 'name': '{}-volume-secondary'.format(instance_name),
'create_option': 'empty',
'disk_size_gb': 32,
'tags': {
- 'Name': '{}-disk1'.format(instance_name)
+ 'Name': '{}-volume-secondary'.format(instance_name)
},
'managed_disk': {
'storage_account_type': instance_storage_account_type
@@ -667,7 +669,7 @@
},
'os_disk': {
'os_type': 'Linux',
- 'name': '{}-disk0'.format(instance_name),
+ 'name': '{}-volume-primary'.format(instance_name),
'create_option': 'fromImage',
'disk_size_gb': int(primary_disk_size),
'tags': tags,
@@ -684,7 +686,7 @@
},
'storage_profile': storage_profile,
'os_profile': {
- 'computer_name': instance_name,
+ 'computer_name': instance_name.replace('_', '-'),
'admin_username': dlab_ssh_user_name,
'linux_configuration': {
'disable_password_authentication': True,
@@ -712,7 +714,7 @@
},
'os_disk': {
'os_type': 'Linux',
- 'name': '{}-disk0'.format(instance_name),
+ 'name': '{}-volume-primary'.format(instance_name),
'create_option': 'fromImage',
'disk_size_gb': int(primary_disk_size),
'tags': tags,
@@ -731,7 +733,7 @@
},
'os_disk': {
'os_type': 'Linux',
- 'name': '{}-disk0'.format(instance_name),
+ 'name': '{}-volume-primary'.format(instance_name),
'create_option': 'fromImage',
'disk_size_gb': int(primary_disk_size),
'tags': tags,
@@ -748,7 +750,7 @@
},
'storage_profile': storage_profile,
'os_profile': {
- 'computer_name': instance_name,
+ 'computer_name': instance_name.replace('_', '-'),
'admin_username': dlab_ssh_user_name,
'linux_configuration': {
'disable_password_authentication': True,
@@ -1082,19 +1084,20 @@
spark_jars_paths = sudo('cat /opt/spark/conf/spark-defaults.conf | grep -e "^spark.jars " ')
except:
spark_jars_paths = None
- user_storage_account_tag = os.environ['conf_service_base_name'] + '-' + (os.environ['project_name'].lower().replace('_', '-')).\
- replace('_', '-') + '-' + os.environ['endpoint_name'].lower().replace('_', '-') + '-storage'
- shared_storage_account_tag = '{0}-{1}-shared-storage'.format(os.environ['conf_service_base_name'],
- os.environ['endpoint_name'])
+ user_storage_account_tag = "{}-{}-{}-bucket".format(os.environ['conf_service_base_name'],
+ os.environ['project_name'].lower(),
+ os.environ['endpoint_name'].lower())
+ shared_storage_account_tag = '{0}-{1}-shared-bucket'.format(os.environ['conf_service_base_name'],
+ os.environ['endpoint_name'].lower())
for storage_account in meta_lib.AzureMeta().list_storage_accounts(os.environ['azure_resource_group_name']):
if user_storage_account_tag == storage_account.tags["Name"]:
user_storage_account_name = storage_account.name
- user_storage_account_key = meta_lib.AzureMeta().list_storage_keys(os.environ['azure_resource_group_name'],
- user_storage_account_name)[0]
+ user_storage_account_key = meta_lib.AzureMeta().list_storage_keys(
+ os.environ['azure_resource_group_name'], user_storage_account_name)[0]
if shared_storage_account_tag == storage_account.tags["Name"]:
shared_storage_account_name = storage_account.name
- shared_storage_account_key = meta_lib.AzureMeta().list_storage_keys(os.environ['azure_resource_group_name'],
- shared_storage_account_name)[0]
+ shared_storage_account_key = meta_lib.AzureMeta().list_storage_keys(
+ os.environ['azure_resource_group_name'], shared_storage_account_name)[0]
if os.environ['azure_datalake_enable'] == 'false':
put(templates_dir + 'core-site-storage.xml', '/tmp/core-site.xml')
else:
diff --git a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
index ba55bc4..b1d0acb 100644
--- a/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
+++ b/infrastructure-provisioning/src/general/lib/gcp/actions_lib.py
@@ -39,6 +39,7 @@
import dlab.common_lib
import backoff
import ast
+import random
class GCPActions:
@@ -292,6 +293,10 @@
unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
access_configs = ''
+ if instance_class == 'edge':
+ ip_forward = True
+ else:
+ ip_forward = False
if instance_class == 'ssn' or instance_class == 'edge':
access_configs = [{
"type": "ONE_TO_ONE_NAT",
@@ -373,6 +378,7 @@
"name": instance_name,
"machineType": "zones/{}/machineTypes/{}".format(zone, instance_size),
"labels": labels,
+ "canIpForward": ip_forward,
"networkInterfaces": [
{
"network": "global/networks/{}".format(vpc_name),
@@ -554,7 +560,9 @@
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
- def set_role_to_service_account(self, service_account_name, role_name, service_base_name, role_type='custom'):
+ def set_role_to_service_account(self, service_account_name, role_name, service_base_name, role_type='custom',
+ num=0):
+ num += 1
request = GCPActions().service_resource.projects().getIamPolicy(resource=self.project, body={})
project_policy = request.execute()
unique_index = meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
@@ -577,6 +585,10 @@
try:
return request.execute()
except Exception as err:
+ if "There were concurrent policy changes. " \
+ "Please retry the whole read-modify-write with exponential backoff." in str(err) and num <= 10:
+ time.sleep(random.randint(5, 20))
+ self.set_role_to_service_account(service_base_name, role_name, service_base_name, role_type, num)
logging.info(
"Unable to set Service account policy: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
@@ -742,12 +754,14 @@
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
- def create_image_from_instance_disks(self, primary_image_name, secondary_image_name, instance_name, zone, lables):
+ def create_image_from_instance_disks(self, primary_image_name, secondary_image_name, instance_name, zone, labels):
primary_disk_name = "projects/{0}/zones/{1}/disks/{2}".format(self.project, zone, instance_name)
secondary_disk_name = "projects/{0}/zones/{1}/disks/{2}-secondary".format(self.project, zone, instance_name)
- primary_params = {"name": primary_image_name, "sourceDisk": primary_disk_name, "labels": lables}
+ labels.update({"name": primary_image_name})
+ primary_params = {"name": primary_image_name, "sourceDisk": primary_disk_name, "labels": labels}
primary_request = self.service.images().insert(project=self.project, body=primary_params)
- secondary_params = {"name": secondary_image_name, "sourceDisk": secondary_disk_name, "labels": lables}
+ labels.update({"name": secondary_image_name})
+ secondary_params = {"name": secondary_image_name, "sourceDisk": secondary_disk_name, "labels": labels}
secondary_request = self.service.images().insert(project=self.project, body=secondary_params)
id_list=[]
try:
@@ -1288,8 +1302,8 @@
try:
templates_dir = '/root/templates/'
sudo('mkdir -p {}'.format(jars_dir))
- sudo('wget https://storage.googleapis.com/hadoop-lib/gcs/{0} -O {1}{0}'
- .format('gcs-connector-latest-hadoop2.jar', jars_dir))
+ sudo('wget https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop2-{0}.jar -O {1}'
+ 'gcs-connector-hadoop2-{0}.jar'.format(os.environ['notebook_gcs_connector_version'], jars_dir))
sudo('wget https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-yarn-server-web-proxy/2.7.4/{0} -O {1}{0}'
.format('hadoop-yarn-server-web-proxy-2.7.4.jar', jars_dir))
put(templates_dir + 'core-site.xml', '/tmp/core-site.xml')
@@ -1394,6 +1408,7 @@
def remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name):
try:
+ computational_name = os.environ['computational_name'].replace('_', '-').lower()
private = meta_lib.get_instance_private_ip_address(cluster_name, notebook_name)
env.hosts = "{}".format(private)
env.user = "{}".format(os_user)
@@ -1442,7 +1457,7 @@
sudo('sleep 5')
sudo('rm -rf /home/{}/.ensure_dir/dataengine_{}_interpreter_ensured'.format(os_user, cluster_name))
if exists('/home/{}/.ensure_dir/rstudio_dataengine_ensured'.format(os_user)):
- dlab.fab.remove_rstudio_dataengines_kernel(os.environ['computational_name'], os_user)
+ dlab.fab.remove_rstudio_dataengines_kernel(computational_name, os_user)
sudo('rm -rf /opt/' + cluster_name + '/')
print("Notebook's {} kernels were removed".format(env.hosts))
except Exception as err:
diff --git a/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py b/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
index b75f0a8..431a859 100644
--- a/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
+++ b/infrastructure-provisioning/src/general/lib/gcp/meta_lib.py
@@ -172,7 +172,8 @@
traceback.print_exc(file=sys.stdout)
def get_instance(self, instance_name):
- request = self.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
+ meta = GCPMeta()
+ request = meta.service.instances().get(project=self.project, zone=os.environ['gcp_zone'],
instance=instance_name)
try:
return request.execute()
@@ -183,8 +184,8 @@
raise err
except Exception as err:
logging.info(
- "Unable to get Firewall: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
- append_result(str({"error": "Unable to get Firewall",
+ "Unable to get instance: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
+ append_result(str({"error": "Unable to get instance",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
@@ -210,7 +211,7 @@
def get_instance_public_ip_by_name(self, instance_name):
try:
- result = GCPMeta().get_instance(instance_name)
+ result = self.get_instance(instance_name)
if result:
for i in result.get('networkInterfaces'):
for j in i.get('accessConfigs'):
@@ -255,10 +256,6 @@
if service_account['displayName'] == service_account_name:
service_account_email = service_account['email']
response = service_account_email[:service_account_email.find('@')][-5:]
- if response == '':
- print("No service account with" + service_account_name + "display name.")
- else:
- print("Service account " + service_account_name + " has " + response + " index.")
return response
else:
print("No service accounts list received.")
@@ -274,7 +271,7 @@
traceback.print_exc(file=sys.stdout)
def get_service_account(self, service_account_name, service_base_name):
- unique_index = GCPMeta().get_index_by_service_account_name(service_account_name)
+ unique_index = self.get_index_by_service_account_name(service_account_name)
if unique_index == '':
service_account_email = "{}@{}.iam.gserviceaccount.com".format(service_base_name, self.project)
else:
@@ -347,7 +344,7 @@
def get_private_ip_address(self, instance_name):
try:
- result = GCPMeta().get_instance(instance_name)
+ result = self.get_instance(instance_name)
for i in result['networkInterfaces']:
return i['networkIP']
except Exception as err:
@@ -704,7 +701,7 @@
def dataproc_waiter(self, labels):
if os.path.exists(
- '/response/.emr_creating_' + os.environ['exploratory_name']) or GCPMeta().get_not_configured_dataproc(
+ '/response/.emr_creating_' + os.environ['exploratory_name']) or self.get_not_configured_dataproc(
os.environ['notebook_instance_name']):
with hide('stderr', 'running', 'warnings'):
local("echo 'Some Dataproc cluster is still being created/terminated, waiting..'")
@@ -742,10 +739,10 @@
try:
private_list_ip = []
if conf_type == 'edge_node' or conf_type == 'exploratory':
- private_list_ip.append(GCPMeta().get_private_ip_address(
+ private_list_ip.append(self.get_private_ip_address(
instance_id))
elif conf_type == 'computational_resource':
- instance_list = GCPMeta().get_list_instances_by_label(
+ instance_list = self.get_list_instances_by_label(
os.environ['gcp_zone'], instance_id)
for instance in instance_list.get('items'):
private_list_ip.append(instance.get('networkInterfaces')[0].get('networkIP'))
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
index c4f8ff4..c70e9a9 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/common_lib.py
@@ -27,32 +27,47 @@
import os
import time
+
def manage_pkg(command, environment, requisites):
try:
- allow = False
- counter = 0
- while not allow:
- if counter > 60:
+ attempt = 0
+ installed = False
+ while not installed:
+ print('Pkg installation attempt: {}'.format(attempt))
+ if attempt > 60:
print("Notebook is broken please recreate it.")
sys.exit(1)
else:
- print('Package manager is:')
- if environment == 'remote':
- if sudo('pgrep "^apt" -a && echo "busy" || echo "ready"') == 'busy':
- counter += 1
- time.sleep(10)
- else:
- allow = True
- sudo('apt-get {0} {1}'.format(command, requisites))
- elif environment == 'local':
- if local('sudo pgrep "^apt" -a && echo "busy" || echo "ready"', capture=True) == 'busy':
- counter += 1
- time.sleep(10)
- else:
- allow = True
- local('sudo apt-get {0} {1}'.format(command, requisites), capture=True)
- else:
- print('Wrong environment')
+ try:
+ allow = False
+ counter = 0
+ while not allow:
+ if counter > 60:
+ print("Notebook is broken please recreate it.")
+ sys.exit(1)
+ else:
+ print('Package manager is:')
+ if environment == 'remote':
+ if sudo('pgrep "^apt" -a && echo "busy" || echo "ready"') == 'busy':
+ counter += 1
+ time.sleep(10)
+ else:
+ allow = True
+ sudo('apt-get {0} {1}'.format(command, requisites))
+ elif environment == 'local':
+ if local('sudo pgrep "^apt" -a && echo "busy" || echo "ready"', capture=True) == 'busy':
+ counter += 1
+ time.sleep(10)
+ else:
+ allow = True
+ local('sudo apt-get {0} {1}'.format(command, requisites), capture=True)
+ else:
+ print('Wrong environment')
+ installed = True
+ except:
+ print("Will try to install with nex attempt.")
+ sudo('dpkg --configure -a')
+ attempt += 1
except:
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
index d73d7ee..582d58e 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/edge_lib.py
@@ -147,15 +147,24 @@
sudo('./configure')
sudo('make build')
sudo('make install')
- sudo('luarocks install lua-resty-jwt')
- sudo('luarocks install lua-resty-session')
- sudo('luarocks install lua-resty-http')
- sudo('luarocks install lua-resty-openidc')
- sudo('luarocks install luacrypto')
- sudo('luarocks install lua-cjson')
- sudo('luarocks install lua-resty-core')
- sudo('luarocks install random')
- sudo('luarocks install lua-resty-string')
+ sudo('wget https://luarocks.org/manifests/cdbattags/lua-resty-jwt-0.2.0-0.src.rock')
+ sudo('luarocks build lua-resty-jwt-0.2.0-0.src.rock')
+ sudo('wget https://luarocks.org/manifests/bungle/lua-resty-session-2.26-1.src.rock')
+ sudo('luarocks build lua-resty-session-2.26-1.src.rock')
+ sudo('wget https://luarocks.org/manifests/pintsized/lua-resty-http-0.15-0.src.rock')
+ sudo('luarocks build lua-resty-http-0.15-0.src.rock')
+ sudo('wget https://luarocks.org/manifests/hanszandbelt/lua-resty-openidc-1.7.2-1.src.rock')
+ sudo('luarocks build lua-resty-openidc-1.7.2-1.src.rock')
+ sudo('wget https://luarocks.org/manifests/starius/luacrypto-0.3.2-2.src.rock')
+ sudo('luarocks build luacrypto-0.3.2-2.src.rock')
+ sudo('wget https://luarocks.org/manifests/openresty/lua-cjson-2.1.0.6-1.src.rock')
+ sudo('luarocks build lua-cjson-2.1.0.6-1.src.rock')
+ sudo('wget https://luarocks.org/manifests/avlubimov/lua-resty-core-0.1.17-4.src.rock')
+ sudo('luarocks build lua-resty-core-0.1.17-4.src.rock')
+ sudo('wget https://luarocks.org/manifests/hjpotter92/random-1.1-0.rockspec')
+ sudo('luarocks install random-1.1-0.rockspec')
+ sudo('wget https://luarocks.org/manifests/rsander/lua-resty-string-0.09-0.rockspec')
+ sudo('luarocks install lua-resty-string-0.09-0.rockspec')
sudo('useradd -r nginx')
sudo('rm -f /etc/nginx/nginx.conf')
diff --git a/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py b/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
index 814421e..bb073c3 100644
--- a/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
+++ b/infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
@@ -105,10 +105,14 @@
try:
random_file_part = id_generator(size=20)
if not exists("/etc/nginx/conf.d/nginx_proxy.conf"):
+ sudo('useradd -r nginx')
sudo('rm -f /etc/nginx/conf.d/*')
+ put(config['nginx_template_dir'] + 'ssn_nginx.conf', '/tmp/nginx.conf')
put(config['nginx_template_dir'] + 'nginx_proxy.conf', '/tmp/nginx_proxy.conf')
sudo("sed -i 's|SSN_HOSTNAME|" + hostname + "|' /tmp/nginx_proxy.conf")
+ sudo('mv /tmp/nginx.conf ' + dlab_path + 'tmp/')
sudo('mv /tmp/nginx_proxy.conf ' + dlab_path + 'tmp/')
+ sudo('\cp ' + dlab_path + 'tmp/nginx.conf /etc/nginx/')
sudo('\cp ' + dlab_path + 'tmp/nginx_proxy.conf /etc/nginx/conf.d/')
sudo('mkdir -p /etc/nginx/locations')
sudo('rm -f /etc/nginx/sites-enabled/default')
diff --git a/infrastructure-provisioning/src/general/lib/os/fab.py b/infrastructure-provisioning/src/general/lib/os/fab.py
index a1e1a16..cd15d42 100644
--- a/infrastructure-provisioning/src/general/lib/os/fab.py
+++ b/infrastructure-provisioning/src/general/lib/os/fab.py
@@ -32,7 +32,7 @@
import dlab.actions_lib
import re
import traceback
-from dlab.common_lib import manage_pkg
+from dlab.common_lib import *
def ensure_pip(requisites):
@@ -41,6 +41,7 @@
sudo('echo PATH=$PATH:/usr/local/bin/:/opt/spark/bin/ >> /etc/profile')
sudo('echo export PATH >> /etc/profile')
sudo('pip install -UI pip=={} --no-cache-dir'.format(os.environ['conf_pip_version']))
+ sudo('pip install --upgrade setuptools')
sudo('pip install -U {} --no-cache-dir'.format(requisites))
sudo('touch /home/{}/.ensure_dir/pip_path_added'.format(os.environ['conf_os_user']))
except:
@@ -119,6 +120,12 @@
def append_result(error, exception=''):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
+ if exception:
+ error_message = "[Error-{}]: {}. Exception: {}".format(st, error, str(exception))
+ print(error_message)
+ else:
+ error_message = "[Error-{}]: {}.".format(st, error)
+ print(error_message)
with open('/root/result.json', 'a+') as f:
text = f.read()
if len(text) == 0:
@@ -127,10 +134,7 @@
f.write(res)
with open("/root/result.json") as f:
data = json.load(f)
- if exception:
- data['error'] = data['error'] + " [Error-" + st + "]:" + error + " Exception: " + str(exception)
- else:
- data['error'] = data['error'] + " [Error-" + st + "]:" + error
+ data['error'] = data['error'] + error_message
with open("/root/result.json", 'w') as f:
json.dump(data, f)
print(data)
@@ -502,7 +506,7 @@
def install_ungit(os_user, notebook_name, edge_ip):
if not exists('/home/{}/.ensure_dir/ungit_ensured'.format(os_user)):
try:
- sudo('npm -g install ungit@{}'.format(os.environ['notebook_ungit_version']))
+ manage_npm_pkg('-g install ungit@{}'.format(os.environ['notebook_ungit_version']))
put('/root/templates/ungit.service', '/tmp/ungit.service')
sudo("sed -i 's|OS_USR|{}|' /tmp/ungit.service".format(os_user))
http_proxy = run('echo $http_proxy')
@@ -551,7 +555,7 @@
run('git config --global https.proxy $https_proxy')
-def install_inactivity_checker(os_user, ip_adress, rstudio=False):
+def install_inactivity_checker(os_user, ip_address, rstudio=False):
if not exists('/home/{}/.ensure_dir/inactivity_ensured'.format(os_user)):
try:
if not exists('/opt/inactivity'):
@@ -562,7 +566,7 @@
put('/root/templates/inactive_rs.sh', '/opt/inactivity/inactive.sh', use_sudo=True)
else:
put('/root/templates/inactive.sh', '/opt/inactivity/inactive.sh', use_sudo=True)
- sudo("sed -i 's|IP_ADRESS|{}|g' /opt/inactivity/inactive.sh".format(ip_adress))
+ sudo("sed -i 's|IP_ADRESS|{}|g' /opt/inactivity/inactive.sh".format(ip_address))
sudo("chmod 755 /opt/inactivity/inactive.sh")
sudo("chown root:root /etc/systemd/system/inactive.service")
sudo("chown root:root /etc/systemd/system/inactive.timer")
@@ -868,3 +872,26 @@
except Exception as err:
print("Failed configure superset: " + str(err))
sys.exit(1)
+
+def manage_npm_pkg(command):
+ try:
+ npm_count = 0
+ installed = False
+ npm_registry = ['https://registry.npmjs.org/', 'https://registry.npmjs.com/']
+ while not installed:
+ if npm_count > 60:
+ print("NPM registry is not available, please try later")
+ sys.exit(1)
+ else:
+ try:
+ if npm_count % 2 == 0:
+ sudo('npm config set registry {}'.format(npm_registry[0]))
+ else:
+ sudo('npm config set registry {}'.format(npm_registry[1]))
+ sudo('npm {}'.format(command))
+ installed = True
+ except:
+ npm_count += 1
+ time.sleep(50)
+ except:
+ sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
index 1e9ef5c..1d5cb04 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_notebook_image.py
@@ -21,30 +21,33 @@
#
# ******************************************************************************
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import sys
import json
import uuid
+import os
if __name__ == "__main__":
try:
image_conf = dict()
- create_aws_config_files()
- image_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ dlab.actions_lib.create_aws_config_files()
+ image_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
image_conf['project_name'] = os.environ['project_name']
image_conf['project_tag'] = os.environ['project_name']
+ image_conf['endpoint_name'] = os.environ['endpoint_name']
image_conf['instance_name'] = os.environ['notebook_instance_name']
- image_conf['instance_tag'] = '{}-Tag'.format(image_conf['service_base_name'])
+ image_conf['instance_tag'] = '{}-tag'.format(image_conf['service_base_name'])
image_conf['application'] = os.environ['application']
- image_conf['image_name'] = os.environ['notebook_image_name'].lower().replace('_', '-')
- image_conf['full_image_name'] = '{}-{}-{}-{}'.format(image_conf['service_base_name'],
- image_conf['project_name'],
- image_conf['application'],
- image_conf['image_name']).lower()
+ image_conf['image_name'] = os.environ['notebook_image_name']
+ image_conf['full_image_name'] = '{}-{}-{}-{}-{}'.format(image_conf['service_base_name'],
+ image_conf['project_name'],
+ image_conf['endpoint_name'],
+ image_conf['application'],
+ image_conf['image_name'])
image_conf['tags'] = {"Name": image_conf['full_image_name'],
"SBN": image_conf['service_base_name'],
"Project": image_conf['project_name'],
@@ -52,16 +55,19 @@
"FIN": image_conf['full_image_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- ami_id = get_ami_id_by_name(image_conf['full_image_name'])
+ ami_id = dlab.meta_lib.get_ami_id_by_name(image_conf['full_image_name'])
if ami_id == '':
try:
- os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
+ ';project_tag:{0};endpoint_tag:{1};'.format(
+ os.environ['project_name'], os.environ['endpoint_name'])
except KeyError:
- os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=image_conf['instance_tag'],
- instance_name=image_conf['instance_name'],
- image_name=image_conf['full_image_name'],
- tags=json.dumps(image_conf['tags']))
+ os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+ os.environ['project_name'], os.environ['endpoint_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(tag_name=image_conf['instance_tag'],
+ instance_name=image_conf['instance_name'],
+ image_name=image_conf['full_image_name'],
+ tags=json.dumps(image_conf['tags']))
print("Image was successfully created. It's name is {}".format(image_conf['full_image_name']))
with open("/root/result.json", 'w') as result:
@@ -73,5 +79,5 @@
"Action": "Create image from notebook"}
result.write(json.dumps(res))
except Exception as err:
- append_result("Failed to create image from notebook", str(err))
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to create image from notebook", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
index d7f5ee0..8b7f038 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_policy.py
@@ -35,6 +35,7 @@
parser.add_argument('--edge_role_name', type=str, default='')
parser.add_argument('--notebook_role_name', type=str, default='')
parser.add_argument('--region', type=str, default='')
+parser.add_argument('--endpoint_name', type=str, default='')
parser.add_argument('--user_predefined_s3_policies', type=str, default='')
args = parser.parse_args()
@@ -64,18 +65,19 @@
for i in list:
if i.get('PolicyName') in list_predefined_policies:
list_policies_arn.append(i.get('Arn'))
- response = iam.create_policy(PolicyName='{}-{}-strict_to_S3-Policy'.
- format(args.service_base_name, args.username), PolicyDocument=policy)
+ response = iam.create_policy(PolicyName='{}-{}-{}-strict_to_S3-Policy'.
+ format(args.service_base_name, args.username, args.endpoint_name),
+ PolicyDocument=policy)
time.sleep(10)
list_policies_arn.append(response.get('Policy').get('Arn'))
except botocore.exceptions.ClientError as cle:
if cle.response['Error']['Code'] == 'EntityAlreadyExists':
- print("Policy {}-{}-strict_to_S3-Policy already exists. Reusing it.".
- format(args.service_base_name, args.username))
+ print("Policy {}-{}-{}-strict_to_S3-Policy already exists. Reusing it.".
+ format(args.service_base_name, args.username, args.endpoint_name))
list = iam.list_policies().get('Policies')
for i in list:
- if '{}-{}-strict_to_S3-Policy'.format(
- args.service_base_name, args.username) == i.get('PolicyName') or (
+ if '{}-{}-{}-strict_to_S3-Policy'.format(
+ args.service_base_name, args.username, args.endpoint_name) == i.get('PolicyName') or (
args.user_predefined_s3_policies != 'None' and i.get('PolicyName') in
list_predefined_policies):
list_policies_arn.append(i.get('Arn'))
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py b/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
index 5469e2a..b4dc3c6 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_create_subnet.py
@@ -132,7 +132,7 @@
print("Associating route_table with the subnet")
ec2 = boto3.resource('ec2')
if os.environ['conf_duo_vpc_enable'] == 'true':
- rt = get_route_table_by_tag(args.infra_tag_value + '-secondary-Tag', args.infra_tag_value)
+ rt = get_route_table_by_tag(args.infra_tag_value + '-secondary-tag', args.infra_tag_value)
else:
rt = get_route_table_by_tag(args.infra_tag_name, args.infra_tag_value)
route_table = ec2.RouteTable(rt)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py b/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
index bd7d266..8051e6d 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_download_git_certfile.py
@@ -43,7 +43,7 @@
env.host_string = env.user + "@" + env.hosts
service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ os.environ['conf_service_base_name'][:20], '-', True)
project_name = os.environ['project_name']
endpoint_name = os.environ['endpoint_name']
bucket_name = ('{0}-{1}-{2}-bucket'.format(service_base_name,
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
index 6fe139b..1d0df4f 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine-service.py
@@ -24,11 +24,21 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
+from fabric.api import *
+
+
+def clear_resources():
+ emr_id = dlab.meta_lib.get_emr_id_by_name(notebook_config['cluster_name'])
+ dlab.actions_lib.terminate_emr(emr_id)
+ dlab.actions_lib. remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'],
+ os.environ['notebook_instance_name'], os.environ['conf_os_user'],
+ notebook_config['key_path'], os.environ['emr_version'])
if __name__ == "__main__":
@@ -38,38 +48,45 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- # generating variables dictionary
- create_aws_config_files()
- print('Generating infrastructure names and tags')
- notebook_config = dict()
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['notebook_name'] = os.environ['notebook_instance_name']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
- notebook_config['project_name'] = os.environ['project_name']
- notebook_config['endpoint_name'] = os.environ['endpoint_name']
- notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
- notebook_config['project_name'],
- notebook_config['endpoint_name'])).lower().replace('_', '-')
- notebook_config['cluster_name'] = get_not_configured_emr(notebook_config['tag_name'],
- notebook_config['notebook_name'], True)
- notebook_config['notebook_ip'] = get_instance_ip_address(notebook_config['tag_name'],
- notebook_config['notebook_name']).get('Private')
- notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
- notebook_config['cluster_id'] = get_emr_id_by_name(notebook_config['cluster_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- if os.environ['application'] == 'deeplearning':
- application = 'jupyter'
- else:
- application = os.environ['application']
+ try:
+ # generating variables dictionary
+ dlab.actions_lib.create_aws_config_files()
+ print('Generating infrastructure names and tags')
+ notebook_config = dict()
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
+ notebook_config['notebook_name'] = os.environ['notebook_instance_name']
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name']
+ ).lower().replace('_', '-')
+ notebook_config['cluster_name'] = dlab.meta_lib.get_not_configured_emr(notebook_config['tag_name'],
+ notebook_config['notebook_name'], True)
+ notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
+ notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+ notebook_config['cluster_id'] = dlab.meta_lib.get_emr_id_by_name(notebook_config['cluster_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ os.environ['project_name'], os.environ['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ if os.environ['application'] == 'deeplearning':
+ application = 'jupyter'
+ else:
+ application = os.environ['application']
+ except Exception as err:
+ clear_resources()
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ sys.exit(1)
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
- params = "--bucket {} --cluster_name {} --emr_version {} --keyfile {} --notebook_ip {} --region {} --emr_excluded_spark_properties {} --project_name {} --os_user {} --edge_hostname {} --proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
+ params = "--bucket {} --cluster_name {} --emr_version {} --keyfile {} --notebook_ip {} --region {} " \
+ "--emr_excluded_spark_properties {} --project_name {} --os_user {} --edge_hostname {} " \
+ "--proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
.format(notebook_config['bucket_name'], notebook_config['cluster_name'], os.environ['emr_version'],
notebook_config['key_path'], notebook_config['notebook_ip'], os.environ['aws_region'],
os.environ['emr_excluded_spark_properties'], os.environ['project_name'],
@@ -77,17 +94,15 @@
os.environ['application'], os.environ['conf_pypi_mirror'])
try:
local("~/scripts/{}_{}.py {}".format(application, 'install_dataengine-service_kernels', params))
- remove_emr_tag(notebook_config['cluster_id'], ['State'])
- tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'], os.environ['conf_tag_resource_id'])
+ dlab.actions_lib.remove_emr_tag(notebook_config['cluster_id'], ['State'])
+ dlab.actions_lib.tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'],
+ os.environ['conf_tag_resource_id'])
except:
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Failed installing EMR kernels.", str(err))
- emr_id = get_emr_id_by_name(notebook_config['cluster_name'])
- terminate_emr(emr_id)
- remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'], os.environ['notebook_instance_name'],
- os.environ['conf_os_user'], notebook_config['key_path'], os.environ['emr_version'])
+ dlab.fab.append_result("Failed installing EMR kernels.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -101,17 +116,15 @@
os.environ['conf_os_user'])
try:
local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
- remove_emr_tag(notebook_config['cluster_id'], ['State'])
- tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'], os.environ['conf_tag_resource_id'])
+ dlab.actions_lib.remove_emr_tag(notebook_config['cluster_id'], ['State'])
+ dlab.actions_lib.tag_emr_volume(notebook_config['cluster_id'], notebook_config['cluster_name'],
+ os.environ['conf_tag_resource_id'])
except:
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Failed to configure Spark.", str(err))
- emr_id = get_emr_id_by_name(notebook_config['cluster_name'])
- terminate_emr(emr_id)
- remove_kernels(notebook_config['cluster_name'], notebook_config['tag_name'], os.environ['notebook_instance_name'],
- os.environ['conf_os_user'], notebook_config['key_path'], os.environ['emr_version'])
+ dlab.fab.append_result("Failed to configure Spark.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -121,6 +134,7 @@
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
index 0cd06be..c80328b 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
+from fabric.api import *
+
+
+def clear_resources():
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
+ for i in range(notebook_config['instance_count'] - 1):
+ slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], slave_name)
if __name__ == "__main__":
@@ -41,25 +50,27 @@
try:
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
notebook_config = dict()
- try:
+ if 'exploratory_name' in os.environ:
notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
+ else:
notebook_config['exploratory_name'] = ''
- try:
+ if 'computational_name' in os.environ:
notebook_config['computational_name'] = os.environ['computational_name']
- except:
+ else:
notebook_config['computational_name'] = ''
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
notebook_config['region'] = os.environ['aws_region']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['project_name'] = os.environ['project_name']
- notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
- '-de-' + notebook_config['exploratory_name'] + '-' + \
- notebook_config['computational_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['cluster_name'] = "{}-{}-{}-de-{}".format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['computational_name'])
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -67,21 +78,18 @@
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
- notebook_config['spark_master_ip'] = get_instance_private_ip_address(
+ notebook_config['spark_master_ip'] = dlab.meta_lib.get_instance_private_ip_address(
notebook_config['tag_name'], notebook_config['master_node_name'])
- notebook_config['notebook_ip'] = get_instance_private_ip_address(
+ notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_private_ip_address(
notebook_config['tag_name'], notebook_config['notebook_name'])
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to get ip address", str(err))
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
- remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(notebook_config['tag_name'], slave_name)
- append_result("Failed to generate infrastructure names", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
@@ -99,11 +107,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(notebook_config['tag_name'], slave_name)
- append_result("Failed installing Dataengine kernels.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
@@ -123,11 +128,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- remove_ec2(notebook_config['tag_name'], notebook_config['master_node_name'])
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(notebook_config['tag_name'], slave_name)
- append_result("Failed to configure Spark.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
@@ -136,6 +138,7 @@
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
index 7e5e0ef..5c481ac 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_prepare_notebook.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
import os
import argparse
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -42,92 +44,107 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
+ try:
+ # generating variables dictionary
+ dlab.actions_lib.create_aws_config_files()
+ notebook_config = dict()
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['edge_name'] = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_status = dlab.meta_lib.get_instance_status(notebook_config['service_base_name'] + '-tag',
+ notebook_config['edge_name'])
+ if edge_status != 'running':
+ logging.info('ERROR: Edge node is unavailable! Aborting...')
+ print('ERROR: Edge node is unavailable! Aborting...')
+ notebook_config['ssn_hostname'] = dlab.meta_lib.get_instance_hostname(
+ '{}-tag'.format(notebook_config['service_base_name']),
+ '{}-ssn'.format(notebook_config['service_base_name']))
+ dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+ notebook_config['ssn_hostname'])
+ dlab.fab.append_result("Edge node is unavailable")
+ sys.exit(1)
+ print('Generating infrastructure names and tags')
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
- # generating variables dictionary
- create_aws_config_files()
- notebook_config = dict()
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['edge_name'] = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_status = get_instance_status(notebook_config['service_base_name'] + '-Tag', notebook_config['edge_name'])
- if edge_status != 'running':
- logging.info('ERROR: Edge node is unavailable! Aborting...')
- print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = get_instance_hostname(notebook_config['service_base_name'] + '-Tag', notebook_config['service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
- append_result("Edge node is unavailable")
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(
+ os.environ['application'])
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}-{4}'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'], os.environ['notebook_image_name']) if (x != 'None' and x != '')
+ else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
+ print('Searching pre-configured images')
+ notebook_config['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+ os.environ['conf_os_family'])])
+ image_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['notebook_image_name'], 'available')
+ if image_id != '':
+ notebook_config['ami_id'] = image_id
+ print('Pre-configured image found. Using: {}'.format(notebook_config['ami_id']))
+ else:
+ os.environ['notebook_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+ print('No pre-configured image found. Using default one: {}'.format(notebook_config['ami_id']))
+
+ tag = {"Key": notebook_config['tag_name'],
+ "Value": "{}-{}-{}-subnet".format(notebook_config['service_base_name'], notebook_config['project_name'],
+ notebook_config['endpoint_name'])}
+ notebook_config['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+
+ with open('/root/result.json', 'w') as f:
+ data = {"notebook_name": notebook_config['instance_name'], "error": ""}
+ json.dump(data, f)
+
+ try:
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ notebook_config['project_name'], notebook_config['endpoint_name'], os.environ['conf_additional_tags'])
+ except KeyError:
+ os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+
+ print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
- print('Generating infrastructure names and tags')
- try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
-
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(os.environ['application'])
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
-
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}'.format(notebook_config['service_base_name'],
- os.environ['project_name'],
- os.environ['application'],
- os.environ['notebook_image_name']).lower().replace('_', '-') if (x != 'None' and x != '')
- else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
- print('Searching pre-configured images')
- notebook_config['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
- image_id = get_ami_id_by_name(notebook_config['notebook_image_name'], 'available')
- if image_id != '':
- notebook_config['ami_id'] = image_id
- print('Pre-configured image found. Using: {}'.format(notebook_config['ami_id']))
- else:
- os.environ['notebook_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
- print('No pre-configured image found. Using default one: {}'.format(notebook_config['ami_id']))
-
- tag = {"Key": notebook_config['tag_name'],
- "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
- notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
-
- with open('/root/result.json', 'w') as f:
- data = {"notebook_name": notebook_config['instance_name'], "error": ""}
- json.dump(data, f)
-
- try:
- os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
- except KeyError:
- os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
-
- print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
-
# launching instance for notebook server
try:
logging.info('[CREATE NOTEBOOK INSTANCE]')
print('[CREATE NOTEBOOK INSTANCE]')
- params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --instance_class {} --instance_disk_size {} --primary_disk_size {}" \
- .format(notebook_config['instance_name'], notebook_config['ami_id'], notebook_config['instance_type'],
- notebook_config['key_name'], get_security_group_by_name(notebook_config['security_group_name']),
- get_subnet_by_cidr(notebook_config['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
- notebook_config['role_profile_name'],
- notebook_config['tag_name'], notebook_config['instance_name'], instance_class,
- os.environ['notebook_disk_size'], notebook_config['primary_disk_size'])
+ params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} " \
+ "--iam_profile {} --infra_tag_name {} --infra_tag_value {} --instance_class {} " \
+ "--instance_disk_size {} --primary_disk_size {}" .format(
+ notebook_config['instance_name'], notebook_config['ami_id'], notebook_config['instance_type'],
+ notebook_config['key_name'],
+ dlab.meta_lib.get_security_group_by_name(notebook_config['security_group_name']),
+ dlab.meta_lib.get_subnet_by_cidr(notebook_config['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
+ notebook_config['role_profile_name'],
+ notebook_config['tag_name'], notebook_config['instance_name'], instance_class,
+ os.environ['notebook_disk_size'], notebook_config['primary_disk_size'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
@@ -135,6 +152,6 @@
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Failed to create instance.", str(err))
+ dlab.fab.append_result("Failed to create instance.", str(err))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
index 9a60aa2..d153082 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_start_notebook.py
@@ -24,12 +24,14 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
import argparse
+from fabric.api import *
if __name__ == "__main__":
@@ -40,13 +42,12 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
notebook_config = dict()
- notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
try:
logging.info('[START NOTEBOOK]')
@@ -54,10 +55,10 @@
params = "--tag_name {} --nb_tag_value {}".format(notebook_config['tag_name'], notebook_config['notebook_name'])
try:
print("Starting notebook")
- start_ec2(notebook_config['tag_name'], notebook_config['notebook_name'])
+ dlab.actions_lib.start_ec2(notebook_config['tag_name'], notebook_config['notebook_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to start notebook.", str(err))
+ dlab.fab.append_result("Failed to start notebook.", str(err))
raise Exception
except:
sys.exit(1)
@@ -65,8 +66,8 @@
try:
logging.info('[SETUP USER GIT CREDENTIALS]')
print('[SETUP USER GIT CREDENTIALS]')
- notebook_config['notebook_ip'] = get_instance_ip_address(notebook_config['tag_name'],
- notebook_config['notebook_name']).get('Private')
+ notebook_config['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
@@ -74,7 +75,7 @@
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed to setup git credentials.", str(err))
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
raise Exception
except:
sys.exit(1)
@@ -88,15 +89,15 @@
local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed to update last activity time.", str(err))
+ dlab.fab.append_result("Failed to update last activity time.", str(err))
raise Exception
except:
sys.exit(1)
-
try:
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['notebook_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['notebook_name'])
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['notebook_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['notebook_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['notebook_name']))
@@ -110,8 +111,8 @@
"Action": "Start up notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
index dc61a7a..679d4eb 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_stop_notebook.py
@@ -24,13 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
+import traceback
import os
import uuid
-from dlab.meta_lib import *
-from dlab.actions_lib import *
import boto3
import argparse
import sys
@@ -39,7 +39,7 @@
def stop_notebook(nb_tag_value, bucket_name, tag_name, ssh_user, key_path):
print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
try:
- clusters_list = get_emr_list(nb_tag_value, 'Value')
+ clusters_list = dlab.meta_lib.get_emr_list(nb_tag_value, 'Value')
if clusters_list:
for cluster_id in clusters_list:
computational_name = ''
@@ -51,11 +51,12 @@
for tag in cluster.get('Tags'):
if tag.get('Key') == 'ComputationalName':
computational_name = tag.get('Value')
- s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+ dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
print("The bucket {} has been cleaned successfully".format(bucket_name))
- terminate_emr(cluster_id)
+ dlab.actions_lib.terminate_emr(cluster_id)
print("The EMR cluster {} has been terminated successfully".format(emr_name))
- remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version, computational_name)
+ dlab.actions_lib.remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path, emr_version,
+ computational_name)
print("{} kernels have been removed from notebook successfully".format(emr_name))
else:
print("There are no EMR clusters to terminate.")
@@ -66,22 +67,22 @@
try:
cluster_list = []
master_ids = []
- cluster_instances_list = get_ec2_list('dataengine_notebook_name', nb_tag_value)
+ cluster_instances_list = dlab.meta_lib.get_ec2_list('dataengine_notebook_name', nb_tag_value)
for instance in cluster_instances_list:
for tag in instance.tags:
if tag['Key'] == 'Type' and tag['Value'] == 'master':
master_ids.append(instance.id)
for id in master_ids:
- for tag in get_instance_attr(id, 'tags'):
+ for tag in dlab.meta_lib.get_instance_attr(id, 'tags'):
if tag['Key'] == 'Name':
cluster_list.append(tag['Value'].replace(' ', '')[:-2])
- stop_ec2('dataengine_notebook_name', nb_tag_value)
+ dlab.actions_lib.stop_ec2('dataengine_notebook_name', nb_tag_value)
except:
sys.exit(1)
print("Stopping notebook")
try:
- stop_ec2(tag_name, nb_tag_value)
+ dlab.actions_lib.stop_ec2(tag_name, nb_tag_value)
except:
sys.exit(1)
@@ -95,18 +96,18 @@
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
notebook_config = dict()
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['project_name'] = os.environ['project_name']
notebook_config['endpoint_name'] = os.environ['endpoint_name']
- notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+ notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
- notebook_config['endpoint_name'])).lower().replace('_', '-')
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['endpoint_name']
+ ).lower().replace('_', '-')
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
logging.info('[STOP NOTEBOOK]')
@@ -116,7 +117,7 @@
os.environ['conf_os_user'], notebook_config['key_path'])
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed to stop notebook.", str(err))
+ dlab.fab.append_result("Failed to stop notebook.", str(err))
sys.exit(1)
@@ -128,7 +129,7 @@
"Action": "Stop notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
index caeaf70..c199089 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook.py
@@ -24,17 +24,19 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
+import boto3
import uuid
def terminate_nb(nb_tag_value, bucket_name, tag_name):
print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
try:
- clusters_list = get_emr_list(nb_tag_value, 'Value')
+ clusters_list = dlab.meta_lib.get_emr_list(nb_tag_value, 'Value')
if clusters_list:
for cluster_id in clusters_list:
client = boto3.client('emr')
@@ -42,10 +44,10 @@
cluster = cluster.get("Cluster")
emr_name = cluster.get('Name')
print('Cleaning bucket from configs for cluster {}'.format(emr_name))
- s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+ dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
print("The bucket {} has been cleaned successfully".format(bucket_name))
print('Terminating cluster {}'.format(emr_name))
- terminate_emr(cluster_id)
+ dlab.actions_lib.terminate_emr(cluster_id)
print("The EMR cluster {} has been terminated successfully".format(emr_name))
else:
print("There are no EMR clusters to terminate.")
@@ -54,13 +56,13 @@
print("Terminating data engine cluster")
try:
- remove_ec2('dataengine_notebook_name', nb_tag_value)
+ dlab.actions_lib.remove_ec2('dataengine_notebook_name', nb_tag_value)
except:
sys.exit(1)
print("Terminating notebook")
try:
- remove_ec2(tag_name, nb_tag_value)
+ dlab.actions_lib.remove_ec2(tag_name, nb_tag_value)
except:
sys.exit(1)
@@ -73,18 +75,18 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
notebook_config = dict()
- notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['project_name'] = os.environ['project_name']
notebook_config['endpoint_name'] = os.environ['endpoint_name']
- notebook_config['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
+ notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
- notebook_config['endpoint_name'])).lower().replace('_', '-')
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['endpoint_name']
+ ).lower().replace('_', '-')
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
try:
logging.info('[TERMINATE NOTEBOOK]')
@@ -93,7 +95,7 @@
terminate_nb(notebook_config['notebook_name'], notebook_config['bucket_name'], notebook_config['tag_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate notebook.", str(err))
+ dlab.fab.append_result("Failed to terminate notebook.", str(err))
raise Exception
except:
sys.exit(1)
@@ -106,6 +108,6 @@
"Action": "Terminate notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
index ce76a1e..3da4f63 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/common_terminate_notebook_image.py
@@ -21,22 +21,23 @@
#
# ******************************************************************************
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import sys
import json
+import os
if __name__ == "__main__":
try:
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
image_conf = dict()
image_conf['full_image_name'] = os.environ['notebook_image_name']
- image_id = get_ami_id_by_name(image_conf['full_image_name'], 'available')
+ image_id = dlab.meta_lib.get_ami_id_by_name(image_conf['full_image_name'], 'available')
if image_id != '':
- deregister_image(image_conf['full_image_name'])
+ dlab.actions_lib.deregister_image(image_conf['full_image_name'])
with open("/root/result.json", 'w') as result:
res = {"notebook_image_name": image_conf['full_image_name'],
@@ -44,5 +45,5 @@
"Action": "Delete existing notebook image"}
result.write(json.dumps(res))
except Exception as err:
- append_result("Failed to delete existing notebook image", str(err))
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to delete existing notebook image", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
index 20206de..9e9fb40 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_configure.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import sys
import os
import logging
@@ -53,9 +54,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create dlab ssh user.", str(err))
- terminate_emr(emr_conf['cluster_id'])
+ dlab.fab.append_result("Failed to create dlab ssh user.", str(err))
+ dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
sys.exit(1)
# configuring proxy on Data Engine service
@@ -72,27 +72,27 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- terminate_emr(emr_conf['cluster_id'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
sys.exit(1)
try:
logging.info('[CONFIGURE DATAENGINE SERVICE]')
print('[CONFIGURE DATAENGINE SERVICE]')
try:
- configure_data_engine_service_pip(emr_conf['instance_ip'], emr_conf['os_user'], emr_conf['key_path'])
+ dlab.fab.configure_data_engine_service_pip(emr_conf['instance_ip'], emr_conf['os_user'],
+ emr_conf['key_path'])
env['connection_attempts'] = 100
env.key_filename = emr_conf['key_path']
env.host_string = emr_conf['os_user'] + '@' + emr_conf['instance_ip']
- sudo('echo "[main]" > /etc/yum/pluginconf.d/priorities.conf ; echo "enabled = 0" >> /etc/yum/pluginconf.d/priorities.conf')
+ sudo('echo "[main]" > /etc/yum/pluginconf.d/priorities.conf ; echo "enabled = 0" >> '
+ '/etc/yum/pluginconf.d/priorities.conf')
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure dataengine service.", str(err))
- terminate_emr(emr_conf['cluster_id'])
+ dlab.fab.append_result("Failed to configure dataengine service.", str(err))
+ dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
sys.exit(1)
@@ -130,12 +130,11 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed edge reverse proxy template", str(err))
- terminate_emr(emr_conf['cluster_id'])
+ dlab.fab.append_result("Failed edge reverse proxy template", str(err))
+ dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
sys.exit(1)
try:
@@ -150,9 +149,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key", str(err))
- terminate_emr(emr_conf['cluster_id'])
+ dlab.fab.append_result("Failed installing users key", str(err))
+ dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
sys.exit(1)
@@ -163,75 +161,78 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
+
try:
- os.environ['exploratory_name']
- except:
- os.environ['exploratory_name'] = ''
- create_aws_config_files()
- print('Generating infrastructure names and tags')
- emr_conf = dict()
- try:
- emr_conf['exploratory_name'] = os.environ['exploratory_name']
- except:
- emr_conf['exploratory_name'] = ''
- try:
- emr_conf['computational_name'] = os.environ['computational_name']
- except:
- emr_conf['computational_name'] = ''
- emr_conf['apps'] = 'Hadoop Hive Hue Spark'
- emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- emr_conf['project_name'] = os.environ['project_name']
- emr_conf['endpoint_name'] = os.environ['endpoint_name']
- emr_conf['tag_name'] = emr_conf['service_base_name'] + '-Tag'
- emr_conf['key_name'] = os.environ['conf_key_name']
- emr_conf['region'] = os.environ['aws_region']
- emr_conf['release_label'] = os.environ['emr_version']
- emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
- emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
- emr_conf['instance_count'] = os.environ['emr_instance_count']
- emr_conf['notebook_ip'] = get_instance_ip_address(emr_conf['tag_name'],
- os.environ['notebook_instance_name']).get('Private')
- emr_conf['network_type'] = os.environ['conf_network_type']
- emr_conf['role_service_name'] = os.environ['emr_service_role']
- emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
- emr_conf['tags'] = 'Name=' + emr_conf['service_base_name'] + '-' + os.environ['project_name'] + '-des-' + \
- emr_conf['exploratory_name'] + '-' + emr_conf['computational_name'] + '-' + args.uuid + \
- ', ' + emr_conf['service_base_name'] + '-Tag=' + emr_conf['service_base_name'] + '-' + \
- os.environ['project_name'] + '-des-' + emr_conf['exploratory_name'] + '-' + \
- emr_conf['computational_name'] + '-' + args.uuid + \
- ', Notebook=' + os.environ['notebook_instance_name'] + ', State=not-configured, Endpoint_tag=' + emr_conf['endpoint_name']
- emr_conf['cluster_name'] = emr_conf['service_base_name'] + '-' + os.environ['project_name'] + '-des-' + \
- emr_conf['exploratory_name'] + '-' + emr_conf['computational_name'] + '-' + \
- args.uuid
- emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
- emr_conf['endpoint_name'])).lower().replace('_', '-')
- tag = {"Key": "{}-Tag".format(emr_conf['service_base_name']), "Value": "{}-{}-subnet".format(
- emr_conf['service_base_name'], os.environ['project_name'])}
- emr_conf['subnet_cidr'] = get_subnet_by_tag(tag)
- emr_conf['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
- emr_conf['all_ip_cidr'] = '0.0.0.0/0'
- emr_conf['additional_emr_sg_name'] = '{}-{}-de-se-additional-sg'.format(emr_conf['service_base_name'],
- os.environ['project_name'])
- emr_conf['vpc_id'] = os.environ['aws_vpc_id']
- emr_conf['cluster_id'] = get_emr_id_by_name(emr_conf['cluster_name'])
- emr_conf['cluster_instances'] = get_emr_instances_list(emr_conf['cluster_id'])
- emr_conf['cluster_master_instances'] = get_emr_instances_list(emr_conf['cluster_id'], 'MASTER')
- emr_conf['cluster_core_instances'] = get_emr_instances_list(emr_conf['cluster_id'], 'CORE')
- emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
- emr_conf['project_name'], emr_conf['endpoint_name'])
- emr_conf['edge_instance_hostname'] = get_instance_private_ip_address(emr_conf['tag_name'],
- emr_conf['edge_instance_name'])
- if emr_conf['network_type'] == 'private':
- emr_conf['edge_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'],
- emr_conf['edge_instance_name']).get('Private')
- else:
- emr_conf['edge_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'],
- emr_conf['edge_instance_name']).get('Public')
- emr_conf['user_keyname'] = os.environ['project_name']
- emr_conf['os_user'] = os.environ['conf_os_user']
- emr_conf['initial_user'] = 'ec2-user'
- emr_conf['sudo_group'] = 'wheel'
+ dlab.actions_lib.create_aws_config_files()
+ print('Generating infrastructure names and tags')
+ emr_conf = dict()
+ if 'exploratory_name' in os.environ:
+ emr_conf['exploratory_name'] = os.environ['exploratory_name']
+ else:
+ emr_conf['exploratory_name'] = ''
+ if 'computational_name' in os.environ:
+ emr_conf['computational_name'] = os.environ['computational_name']
+ else:
+ emr_conf['computational_name'] = ''
+ emr_conf['apps'] = 'Hadoop Hive Hue Spark'
+ emr_conf['service_base_name'] = os.environ['conf_service_base_name']
+ emr_conf['project_name'] = os.environ['project_name']
+ emr_conf['endpoint_name'] = os.environ['endpoint_name']
+ emr_conf['tag_name'] = emr_conf['service_base_name'] + '-tag'
+ emr_conf['key_name'] = os.environ['conf_key_name']
+ emr_conf['region'] = os.environ['aws_region']
+ emr_conf['release_label'] = os.environ['emr_version']
+ emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
+ emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
+ emr_conf['instance_count'] = os.environ['emr_instance_count']
+ emr_conf['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+ emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
+ emr_conf['network_type'] = os.environ['conf_network_type']
+ emr_conf['role_service_name'] = os.environ['emr_service_role']
+ emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
+ emr_conf['tags'] = "Name={0}-{1}-{2}-des-{3}-{4}," \
+ "{0}-tag={0}-{1}-{2}-des-{3}-{4}," \
+ "Notebook={5}," \
+ "State=not-configured," \
+ "Endpoint_tag={2}".format(
+ emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'],
+ emr_conf['exploratory_name'], args.uuid, os.environ['notebook_instance_name'])
+ emr_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}-{4}' \
+ .format(emr_conf['service_base_name'],
+ emr_conf['project_name'],
+ emr_conf['endpoint_name'],
+ emr_conf['computational_name'],
+ args.uuid)
+ emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+ emr_conf['endpoint_name']).lower().replace('_', '-')
+ tag = {"Key": "{}-tag".format(emr_conf['service_base_name']), "Value": "{}-{}-{}-subnet".format(
+ emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'])}
+ emr_conf['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+ emr_conf['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'],
+ os.environ['conf_key_name'])
+ emr_conf['all_ip_cidr'] = '0.0.0.0/0'
+ emr_conf['additional_emr_sg_name'] = '{}-{}-{}-de-se-additional-sg'.format(emr_conf['service_base_name'],
+ emr_conf['project_name'],
+ emr_conf['endpoint_name'])
+ emr_conf['vpc_id'] = os.environ['aws_vpc_id']
+ emr_conf['cluster_id'] = dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])
+ emr_conf['cluster_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'])
+ emr_conf['cluster_master_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'MASTER')
+ emr_conf['cluster_core_instances'] = dlab.meta_lib.get_emr_instances_list(emr_conf['cluster_id'], 'CORE')
+ emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
+ emr_conf['project_name'], emr_conf['endpoint_name'])
+ emr_conf['edge_instance_hostname'] = dlab.meta_lib.get_instance_private_ip_address(
+ emr_conf['tag_name'], emr_conf['edge_instance_name'])
+ emr_conf['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(emr_conf['tag_name'],
+ emr_conf['edge_instance_name'])
+ emr_conf['user_keyname'] = emr_conf['project_name']
+ emr_conf['os_user'] = os.environ['conf_os_user']
+ emr_conf['initial_user'] = 'ec2-user'
+ emr_conf['sudo_group'] = 'wheel'
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
+ sys.exit(1)
try:
jobs = []
@@ -252,14 +253,14 @@
logging.info('[SUMMARY]')
ip_address = emr_conf['cluster_master_instances'][0].get('PrivateIpAddress')
emr_master_url = "http://" + ip_address + ":8088"
- emr_master_acces_url = "https://" + emr_conf['edge_instance_ip'] + "/{}/".format(emr_conf['exploratory_name'] +
- '_' +
- emr_conf['computational_name'])
+ emr_master_acces_url = "https://{}/{}_{}/".format(emr_conf['edge_instance_hostname'],
+ emr_conf['exploratory_name'],
+ emr_conf['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(emr_conf['service_base_name']))
print("Cluster name: {}".format(emr_conf['cluster_name']))
- print("Cluster id: {}".format(get_emr_id_by_name(emr_conf['cluster_name'])))
+ print("Cluster id: {}".format(dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])))
print("Key name: {}".format(emr_conf['key_name']))
print("Region: {}".format(emr_conf['region']))
print("EMR version: {}".format(emr_conf['release_label']))
@@ -270,7 +271,7 @@
print("Bucket name: {}".format(emr_conf['bucket_name']))
with open("/root/result.json", 'w') as result:
res = {"hostname": emr_conf['cluster_name'],
- "instance_id": get_emr_id_by_name(emr_conf['cluster_name']),
+ "instance_id": dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name']),
"key_name": emr_conf['key_name'],
"user_own_bucket_name": emr_conf['bucket_name'],
"Action": "Create new EMR cluster",
@@ -282,8 +283,7 @@
]}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
-
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ dlab.actions_lib.terminate_emr(emr_conf['cluster_id'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
index 0f1f510..7dd94d9 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_prepare.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import argparse
import sys
import os
@@ -46,119 +47,111 @@
level=logging.INFO,
filename=local_log_filepath)
try:
- os.environ['exploratory_name']
- except:
- os.environ['exploratory_name'] = ''
- if os.path.exists('/response/.emr_creating_{}'.format(os.environ['exploratory_name'])):
- time.sleep(30)
- create_aws_config_files()
- emr_conf = dict()
- emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- emr_conf['project_name'] = os.environ['project_name']
- emr_conf['endpoint_name'] = os.environ['endpoint_name']
- edge_status = get_instance_status(emr_conf['service_base_name'] + '-Tag', '{0}-{1}-{2}-edge'
- .format(emr_conf['service_base_name'],
- emr_conf['project_name'],
- emr_conf['endpoint_name']))
- if edge_status != 'running':
- logging.info('ERROR: Edge node is unavailable! Aborting...')
- print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = get_instance_hostname(
- emr_conf['service_base_name'] + '-Tag',
- emr_conf['service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable',
- os.environ['ssn_dlab_path'],
- os.environ['conf_os_user'], ssn_hostname)
- append_result("Edge node is unavailable")
+ emr_conf = dict()
+ if 'exploratory_name' in os.environ:
+ emr_conf['exploratory_name'] = os.environ['exploratory_name']
+ else:
+ emr_conf['exploratory_name'] = ''
+ if os.path.exists('/response/.emr_creating_{}'.format(emr_conf['exploratory_name'])):
+ time.sleep(30)
+ dlab.actions_lib.create_aws_config_files()
+ emr_conf['service_base_name'] = os.environ['conf_service_base_name']
+ emr_conf['project_name'] = os.environ['project_name']
+ emr_conf['endpoint_name'] = os.environ['endpoint_name']
+ edge_status = dlab.meta_lib.get_instance_status(
+ '{}-tag'.format(emr_conf['service_base_name']),
+ '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+ emr_conf['endpoint_name']))
+ if edge_status != 'running':
+ logging.info('ERROR: Edge node is unavailable! Aborting...')
+ print('ERROR: Edge node is unavailable! Aborting...')
+ ssn_hostname = dlab.meta_lib.get_instance_hostname(
+ emr_conf['service_base_name'] + '-tag',
+ emr_conf['service_base_name'] + '-ssn')
+ dlab.fab.put_resource_status('edge', 'Unavailable',
+ os.environ['ssn_dlab_path'],
+ os.environ['conf_os_user'], ssn_hostname)
+ dlab.fab.append_result("Edge node is unavailable")
+ sys.exit(1)
+ print('Generating infrastructure names and tags')
+ if 'computational_name' in os.environ:
+ emr_conf['computational_name'] = os.environ['computational_name']
+ else:
+ emr_conf['computational_name'] = ''
+ emr_conf['apps'] = 'Hadoop Hive Hue Spark'
+ emr_conf['tag_name'] = '{0}-tag'.format(emr_conf['service_base_name'])
+ emr_conf['key_name'] = os.environ['conf_key_name']
+ emr_conf['endpoint_tag'] = emr_conf['endpoint_name']
+ emr_conf['project_tag'] = emr_conf['project_name']
+ emr_conf['region'] = os.environ['aws_region']
+ emr_conf['release_label'] = os.environ['emr_version']
+ emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
+ emr_conf['project_name'], emr_conf['endpoint_name'])
+ emr_conf['edge_security_group_name'] = '{0}-sg'.format(emr_conf['edge_instance_name'])
+ emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
+ emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
+ emr_conf['instance_count'] = os.environ['emr_instance_count']
+ emr_conf['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+ emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
+ emr_conf['role_service_name'] = os.environ['emr_service_role']
+ emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
+ emr_conf['tags'] = 'Name={0}-{1}-{5}-des-{3},' \
+ '{0}-tag={0}-{1}-{5}-des-{3},' \
+ 'Notebook={4},' \
+ 'State=not-configured,' \
+ 'ComputationalName={3},' \
+ 'Endpoint_tag={5}'\
+ .format(emr_conf['service_base_name'],
+ emr_conf['project_name'],
+ emr_conf['exploratory_name'],
+ emr_conf['computational_name'],
+ os.environ['notebook_instance_name'],
+ emr_conf['endpoint_name'])
+ emr_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}-{4}'\
+ .format(emr_conf['service_base_name'],
+ emr_conf['project_name'],
+ emr_conf['endpoint_name'],
+ emr_conf['computational_name'],
+ args.uuid)
+ emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+ emr_conf['endpoint_name']).lower().replace('_', '-')
+ emr_conf['configurations'] = '[]'
+ if 'emr_configurations' in os.environ:
+ emr_conf['configurations'] = os.environ['emr_configurations']
+
+ tag = {"Key": "{}-tag".format(emr_conf['service_base_name']),
+ "Value": "{}-{}-{}-subnet".format(emr_conf['service_base_name'], emr_conf['project_name'],
+ emr_conf['endpoint_name'])}
+ emr_conf['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+ emr_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ emr_conf['all_ip_cidr'] = '0.0.0.0/0'
+ emr_conf['additional_emr_sg_name'] = '{}-{}-{}-de-se-additional-sg'\
+ .format(emr_conf['service_base_name'], emr_conf['project_name'], emr_conf['endpoint_name'])
+ emr_conf['vpc_id'] = os.environ['aws_vpc_id']
+ emr_conf['vpc2_id'] = os.environ['aws_notebook_vpc_id']
+ emr_conf['provision_instance_ip'] = None
+ try:
+ emr_conf['provision_instance_ip'] = dlab.meta_lib.get_instance_ip_address(
+ emr_conf['tag_name'], '{0}-{1}-endpoint'.format(emr_conf['service_base_name'],
+ emr_conf['endpoint_name'])).get('Private') + "/32"
+ except:
+ emr_conf['provision_instance_ip'] = dlab.meta_lib.get_instance_ip_address(
+ emr_conf['tag_name'], '{0}-ssn'.format(emr_conf['service_base_name'])).get('Private') + "/32"
+ if os.environ['emr_slave_instance_spot'] == 'True':
+ ondemand_price = float(dlab.meta_lib.get_ec2_price(emr_conf['slave_instance_type'], emr_conf['region']))
+ emr_conf['slave_bid_price'] = (ondemand_price * int(os.environ['emr_slave_instance_spot_pct_price'])) / 100
+ else:
+ emr_conf['slave_bid_price'] = 0
+ if 'emr_timeout' in os.environ:
+ emr_conf['emr_timeout'] = os.environ['emr_timeout']
+ else:
+ emr_conf['emr_timeout'] = "1200"
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
sys.exit(1)
- print('Generating infrastructure names and tags')
- try:
- emr_conf['exploratory_name'] = os.environ['exploratory_name']
- except:
- emr_conf['exploratory_name'] = ''
- try:
- emr_conf['computational_name'] = os.environ['computational_name']
- except:
- emr_conf['computational_name'] = ''
- emr_conf['apps'] = 'Hadoop Hive Hue Spark'
- emr_conf['tag_name'] = '{0}-Tag'.format(emr_conf['service_base_name'])
- emr_conf['key_name'] = os.environ['conf_key_name']
- emr_conf['endpoint_tag'] = os.environ['endpoint_name']
- emr_conf['endpoint_name'] = os.environ['endpoint_name']
- emr_conf['project_tag'] = os.environ['project_name']
- emr_conf['region'] = os.environ['aws_region']
- emr_conf['release_label'] = os.environ['emr_version']
- emr_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(emr_conf['service_base_name'],
- os.environ['project_name'], emr_conf['endpoint_tag'])
- emr_conf['edge_security_group_name'] = '{0}-sg'.format(emr_conf['edge_instance_name'])
- emr_conf['master_instance_type'] = os.environ['emr_master_instance_type']
- emr_conf['slave_instance_type'] = os.environ['emr_slave_instance_type']
- emr_conf['instance_count'] = os.environ['emr_instance_count']
- emr_conf['notebook_ip'] = get_instance_ip_address(
- emr_conf['tag_name'], os.environ['notebook_instance_name']).get('Private')
- emr_conf['role_service_name'] = os.environ['emr_service_role']
- emr_conf['role_ec2_name'] = os.environ['emr_ec2_role']
- emr_conf['tags'] = 'Name={0}-{1}-des-{2}-{3},' \
- '{0}-Tag={0}-{1}-des-{2}-{3},' \
- 'Notebook={4},' \
- 'State=not-configured,' \
- 'ComputationalName={3},' \
- 'Endpoint_tag={5}'\
- .format(emr_conf['service_base_name'],
- os.environ['project_name'],
- emr_conf['exploratory_name'],
- emr_conf['computational_name'],
- os.environ['notebook_instance_name'],
- emr_conf['endpoint_name'])
- emr_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}-{4}'\
- .format(emr_conf['service_base_name'],
- os.environ['project_name'],
- emr_conf['exploratory_name'],
- emr_conf['computational_name'],
- args.uuid)
- emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
- emr_conf['endpoint_name'])).lower().replace('_', '-')
- emr_conf['configurations'] = '[]'
- if 'emr_configurations' in os.environ:
- emr_conf['configurations'] = os.environ['emr_configurations']
-
- tag = {"Key": "{}-Tag".format(emr_conf['service_base_name']),
- "Value": "{}-{}-subnet".format(emr_conf['service_base_name'],
- os.environ['project_name'])}
- emr_conf['subnet_cidr'] = get_subnet_by_tag(tag)
- emr_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- emr_conf['all_ip_cidr'] = '0.0.0.0/0'
- emr_conf['additional_emr_sg_name'] = '{}-{}-de-se-additional-sg'\
- .format(emr_conf['service_base_name'], os.environ['project_name'])
- emr_conf['vpc_id'] = os.environ['aws_vpc_id']
- emr_conf['vpc2_id'] = os.environ['aws_notebook_vpc_id']
- emr_conf['provision_instance_ip'] = None
- try:
- emr_conf['provision_instance_ip'] = get_instance_ip_address(
- emr_conf['tag_name'], '{0}-{1}-endpoint'.format(emr_conf['service_base_name'],
- os.environ['endpoint_name'])).get('Private') + "/32"
- except:
- emr_conf['provision_instance_ip'] = get_instance_ip_address(emr_conf['tag_name'], '{0}-ssn'.format(
- emr_conf['service_base_name'])).get('Private') + "/32"
- if os.environ['emr_slave_instance_spot'] == 'True':
- ondemand_price = float(get_ec2_price(emr_conf['slave_instance_type'], emr_conf['region']))
- emr_conf['slave_bid_price'] = (ondemand_price * int(os.environ['emr_slave_instance_spot_pct_price'])) / 100
- else:
- emr_conf['slave_bid_price'] = 0
-
- try:
- emr_conf['emr_timeout'] = os.environ['emr_timeout']
- except:
- emr_conf['emr_timeout'] = "1200"
-
- print("Will create exploratory environment with edge node "
- "as access point as following: {}".
- format(json.dumps(emr_conf,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
+ print("Will create exploratory environment with edge node as access point as following: {}".format(
+ json.dumps(emr_conf, sort_keys=True, indent=4, separators=(',', ': '))))
logging.info(json.dumps(emr_conf))
with open('/root/result.json', 'w') as f:
@@ -166,11 +159,11 @@
json.dump(data, f)
try:
- emr_waiter(emr_conf['tag_name'], os.environ['notebook_instance_name'])
- local('touch /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
+ dlab.meta_lib.emr_waiter(emr_conf['tag_name'], os.environ['notebook_instance_name'])
+ local('touch /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
except Exception as err:
traceback.print_exc()
- append_result("EMR waiter fail.", str(err))
+ dlab.fab.append_result("EMR waiter fail.", str(err))
sys.exit(1)
with open('/root/result.json', 'w') as f:
@@ -180,8 +173,8 @@
logging.info('[CREATING ADDITIONAL SECURITY GROUPS FOR EMR]')
print("[CREATING ADDITIONAL SECURITY GROUPS FOR EMR]")
try:
- edge_group_id = check_security_group(emr_conf['edge_security_group_name'])
- cluster_sg_ingress = format_sg([
+ edge_group_id = dlab.meta_lib.check_security_group(emr_conf['edge_security_group_name'])
+ cluster_sg_ingress = dlab.meta_lib.format_sg([
{
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": emr_conf['subnet_cidr']}],
@@ -201,7 +194,7 @@
"PrefixListIds": []
}
])
- cluster_sg_egress = format_sg([
+ cluster_sg_egress = dlab.meta_lib.format_sg([
{
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": emr_conf['subnet_cidr']}],
@@ -245,18 +238,18 @@
emr_conf['cluster_name'], True)
try:
if 'conf_additional_tags' in os.environ:
- os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
- emr_conf['project_tag'], emr_conf['endpoint_tag'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ emr_conf['project_tag'], emr_conf['endpoint_tag'], os.environ['conf_additional_tags'])
else:
- os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(emr_conf['project_tag'], emr_conf['endpoint_tag'])
+ os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(emr_conf['project_tag'],
+ emr_conf['endpoint_tag'])
print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
local("~/scripts/{}.py {}".format('common_create_security_group', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create sg.", str(err))
+ dlab.fab.append_result("Failed to create sg.", str(err))
sys.exit(1)
local("echo Waiting for changes to propagate; sleep 10")
@@ -304,7 +297,7 @@
emr_conf['region'],
emr_conf['tags'],
os.environ['conf_key_dir'],
- os.environ['project_name'],
+ emr_conf['project_name'],
os.environ['emr_slave_instance_spot'],
str(emr_conf['slave_bid_price']),
emr_conf['service_base_name'],
@@ -315,14 +308,12 @@
except:
traceback.print_exc()
raise Exception
-
cluster_name = emr_conf['cluster_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], emr_conf['key_name'])
- local('rm /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
+ local('rm /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create EMR Cluster.", str(err))
- local('rm /response/.emr_creating_{}'.format(os.environ['exploratory_name']))
- emr_id = get_emr_id_by_name(emr_conf['cluster_name'])
- terminate_emr(emr_id)
+ dlab.fab.append_result("Failed to create EMR Cluster.", str(err))
+ local('rm /response/.emr_creating_{}'.format(emr_conf['exploratory_name']))
+ emr_id = dlab.meta_lib.get_emr_id_by_name(emr_conf['cluster_name'])
+ dlab.actions_lib.terminate_emr(emr_id)
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
index 05f2e1f..e9551e3 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine-service_terminate.py
@@ -21,18 +21,22 @@
#
# ******************************************************************************
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import boto3
+import logging
import argparse
import sys
import os
+import traceback
+import json
def terminate_emr_cluster(emr_name, bucket_name, tag_name, nb_tag_value, ssh_user, key_path):
print('Terminating EMR cluster and cleaning EMR config from S3 bucket')
try:
- clusters_list = get_emr_list(emr_name, 'Value')
+ clusters_list = dlab.meta_lib.get_emr_list(emr_name, 'Value')
if clusters_list:
for cluster_id in clusters_list:
computational_name = ''
@@ -44,13 +48,13 @@
for tag in cluster.get('Tags'):
if tag.get('Key') == 'ComputationalName':
computational_name = tag.get('Value')
- s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
+ dlab.actions_lib.s3_cleanup(bucket_name, emr_name, os.environ['project_name'])
print("The bucket {} has been cleaned successfully".format(bucket_name))
- terminate_emr(cluster_id)
+ dlab.actions_lib.terminate_emr(cluster_id)
print("The EMR cluster {} has been terminated successfully".format(emr_name))
print("Removing EMR kernels from notebook")
- remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path,
- emr_version, computational_name)
+ dlab.actions_lib.remove_kernels(emr_name, tag_name, nb_tag_value, ssh_user, key_path,
+ emr_version, computational_name)
else:
print("There are no EMR clusters to terminate.")
except:
@@ -66,19 +70,18 @@
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
emr_conf = dict()
- emr_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ emr_conf['service_base_name'] = (os.environ['conf_service_base_name'])
emr_conf['emr_name'] = os.environ['emr_cluster_name']
emr_conf['notebook_name'] = os.environ['notebook_instance_name']
emr_conf['project_name'] = os.environ['project_name']
emr_conf['endpoint_name'] = os.environ['endpoint_name']
- emr_conf['bucket_name'] = ('{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
- emr_conf['endpoint_name'])).lower().replace('_', '-')
+ emr_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(emr_conf['service_base_name'], emr_conf['project_name'],
+ emr_conf['endpoint_name']).lower().replace('_', '-')
emr_conf['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
- emr_conf['tag_name'] = emr_conf['service_base_name'] + '-Tag'
+ emr_conf['tag_name'] = emr_conf['service_base_name'] + '-tag'
try:
logging.info('[TERMINATE EMR CLUSTER]')
@@ -88,7 +91,7 @@
emr_conf['notebook_name'], os.environ['conf_os_user'], emr_conf['key_path'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate EMR cluster.", str(err))
+ dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
raise Exception
except:
sys.exit(1)
@@ -101,6 +104,6 @@
"Action": "Terminate EMR cluster"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
index 224b0dc..e0a4f0c 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import sys
import os
import uuid
@@ -37,13 +38,13 @@
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
- slave_hostname = get_instance_private_ip_address(data_engine['tag_name'], slave_name)
+ slave_hostname = dlab.meta_lib.get_instance_private_ip_address(data_engine['tag_name'], slave_name)
try:
logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
print('[CREATING DLAB SSH USER ON SLAVE NODE]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
- (slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
- data_engine['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
+ data_engine['initial_user'], data_engine['dlab_ssh_user'], data_engine['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -51,12 +52,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to create ssh user on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
@@ -70,12 +67,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to clean slave instance.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to clean slave instance.", str(err))
sys.exit(1)
try:
@@ -91,12 +84,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to configure proxy on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
@@ -111,18 +100,15 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to install prerequisites on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
- params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
+ params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
+ "--scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
@@ -133,12 +119,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to configure slave node.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure slave node.", str(err))
sys.exit(1)
try:
@@ -153,15 +135,18 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed install users key on slave node.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed install users key on slave node.", str(err))
sys.exit(1)
+def clear_resources():
+ dlab.actions_lib.remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
+ for i in range(data_engine['instance_count'] - 1):
+ slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+ dlab.actions_lib.remove_ec2(data_engine['tag_name'], slave_name)
+
+
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
@@ -173,81 +158,77 @@
try:
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
+ if 'exploratory_name' in os.environ:
data_engine['exploratory_name'] = os.environ['exploratory_name']
- except:
+ else:
data_engine['exploratory_name'] = ''
- try:
+ if 'computational_name' in os.environ:
data_engine['computational_name'] = os.environ['computational_name']
- except:
+ else:
data_engine['computational_name'] = ''
- data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+ data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+ data_engine['project_name'] = os.environ['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['region'] = os.environ['aws_region']
data_engine['network_type'] = os.environ['conf_network_type']
- data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
- '-de-' + data_engine['exploratory_name'] + '-' + \
- data_engine['computational_name']
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
- data_engine['dataengine_master_security_group_name'] = data_engine['service_base_name'] + '-' + \
- os.environ['project_name'] + '-dataengine-master-sg'
- data_engine['dataengine_slave_security_group_name'] = data_engine['service_base_name'] + '-' + \
- os.environ['project_name'] + '-dataengine-slave-sg'
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+ data_engine['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg' \
+ .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg' \
+ .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
tag = {"Key": data_engine['tag_name'],
- "Value": "{}-{}-subnet".format(data_engine['service_base_name'], os.environ['project_name'])}
- data_engine['subnet_cidr'] = get_subnet_by_tag(tag)
- data_engine['notebook_dataengine_role_profile_name'] = data_engine['service_base_name']. \
- lower().replace('-', '_') + "-" + \
- os.environ['project_name'] + "-" + os.environ['endpoint_name'] + '-nb-de-Profile'
+ "Value": "{}-{}-{}-subnet".format(data_engine['service_base_name'], data_engine['project_name'],
+ data_engine['endpoint_name'])}
+ data_engine['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+ data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile' \
+ .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
- master_node_hostname = get_instance_hostname(data_engine['tag_name'], data_engine['master_node_name'])
+ master_node_hostname = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'],
+ data_engine['master_node_name'])
data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
- data_engine['user_keyname'] = os.environ['project_name']
+ data_engine['user_keyname'] = data_engine['project_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- data_engine['project_name'] = os.environ['project_name']
- data_engine['endpoint_name'] = os.environ['endpoint_name']
edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
data_engine['project_name'], data_engine['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(data_engine['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Private')
- if data_engine['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(data_engine['tag_name'], edge_instance_name).get('Public')
-
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
+ edge_instance_name).get('Private')
+ data_engine['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(data_engine['tag_name'],
+ edge_instance_name)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ data_engine['initial_user'] = 'ubuntu'
+ data_engine['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ data_engine['initial_user'] = 'ec2-user'
+ data_engine['sudo_group'] = 'wheel'
except Exception as err:
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
- data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
- '-de-' + data_engine['exploratory_name'] + '-' + \
- data_engine['computational_name']
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(int(os.environ['dataengine_instance_count']) - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- print("Failed to generate variables dictionary.")
- append_result("Failed to generate variables dictionary.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER ON MASTER NODE]')
print('[CREATING DLAB SSH USER ON MASTER NODE]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
- data_engine['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
+ data_engine['initial_user'], data_engine['dlab_ssh_user'], data_engine['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -255,12 +236,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to create ssh user on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
@@ -274,12 +251,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to clean master instance.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to clean master instance.", str(err))
sys.exit(1)
try:
@@ -295,12 +268,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to configure proxy on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
@@ -315,12 +284,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed to install prerequisites on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
@@ -335,18 +300,15 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
- append_result("Failed install users key on master node.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed install users key on master node.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE MASTER NODE]')
print('[CONFIGURE MASTER NODE]')
- params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} --scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
+ params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
+ "--scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['dlab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
@@ -357,12 +319,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure master node", str(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
+ dlab.fab.append_result("Failed to configure master node", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -377,17 +335,15 @@
if job.exitcode != 0:
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
+ dlab.fab.append_result("Failed to configure slave nodes.", str(err))
+ clear_resources()
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
- notebook_instance_ip = get_instance_private_ip_address('Name', os.environ['notebook_instance_name'])
+ notebook_instance_ip = dlab.meta_lib.get_instance_private_ip_address('Name',
+ os.environ['notebook_instance_name'])
additional_info = {
"computational_name": data_engine['computational_name'],
"master_node_hostname": master_node_hostname,
@@ -412,22 +368,20 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- remove_ec2(data_engine['tag_name'], slave_name)
+ dlab.fab.append_result("Failed to configure reverse proxy.", str(err))
+ clear_resources()
sys.exit(1)
try:
- ip_address = get_instance_ip_address(data_engine['tag_name'],
- data_engine['master_node_name']).get('Private')
+ ip_address = dlab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
+ data_engine['master_node_name']).get('Private')
spark_master_url = "http://" + ip_address + ":8080"
- spark_master_access_url = "https://" + edge_instance_ip + "/{}/".format(data_engine['exploratory_name'] +
- '_' + data_engine['computational_name'])
+ spark_master_access_url = "https://{}/{}_{}/".format(data_engine['edge_instance_hostname'],
+ data_engine['exploratory_name'],
+ data_engine['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(data_engine['service_base_name']))
@@ -438,7 +392,8 @@
print("Instance count: {}".format(str(data_engine['instance_count'])))
with open("/root/result.json", 'w') as result:
res = {"hostname": data_engine['cluster_name'],
- "instance_id": get_instance_by_name(data_engine['tag_name'], data_engine['master_node_name']),
+ "instance_id": dlab.meta_lib.get_instance_by_name(data_engine['tag_name'],
+ data_engine['master_node_name']),
"key_name": data_engine['key_name'],
"Action": "Create new Data Engine",
"computational_url": [
@@ -449,6 +404,7 @@
]}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
index 62b6a95..ad19f7a 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_prepare.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import sys
import os
import uuid
@@ -43,84 +44,85 @@
level=logging.INFO,
filename=local_log_filepath)
try:
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
data_engine = dict()
- data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- edge_status = get_instance_status(data_engine['service_base_name'] + '-Tag',
- '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
- os.environ['project_name'],
- os.environ['endpoint_name']))
+ data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+ data_engine['project_name'] = os.environ['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ edge_status = dlab.meta_lib.get_instance_status(
+ data_engine['service_base_name'] + '-tag', '{0}-{1}-{2}-edge'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name']))
if edge_status != 'running':
logging.info('ERROR: Edge node is unavailable! Aborting...')
print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = get_instance_hostname(data_engine['service_base_name'] + '-Tag',
- data_engine['service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
- ssn_hostname)
- append_result("Edge node is unavailable")
+ ssn_hostname = dlab.meta_lib.get_instance_hostname(data_engine['service_base_name'] + '-tag',
+ data_engine['service_base_name'] + '-ssn')
+ dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+ ssn_hostname)
+ dlab.fab.append_result("Edge node is unavailable")
sys.exit(1)
print('Generating infrastructure names and tags')
-
- try:
+ if 'exploratory_name' in os.environ:
data_engine['exploratory_name'] = os.environ['exploratory_name']
- except:
+ else:
data_engine['exploratory_name'] = ''
- try:
+ if 'computational_name' in os.environ:
data_engine['computational_name'] = os.environ['computational_name']
- except:
+ else:
data_engine['computational_name'] = ''
-
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['region'] = os.environ['aws_region']
- data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + os.environ['project_name'] + \
- '-de-' + data_engine['exploratory_name'] + '-' + \
- data_engine['computational_name']
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
- data_engine['dataengine_master_security_group_name'] = '{}-{}-dataengine-master-sg' \
- .format(data_engine['service_base_name'], os.environ['project_name'])
- data_engine['dataengine_slave_security_group_name'] = '{}-{}-dataengine-slave-sg' \
- .format(data_engine['service_base_name'], os.environ['project_name'])
- data_engine['tag_name'] = '{}-Tag'.format(data_engine['service_base_name'])
+ data_engine['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg' \
+ .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg' \
+ .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['tag_name'] = '{}-tag'.format(data_engine['service_base_name'])
tag = {"Key": data_engine['tag_name'],
- "Value": "{}-{}-subnet".format(data_engine['service_base_name'], os.environ['project_name'])}
- data_engine['subnet_cidr'] = get_subnet_by_tag(tag)
- data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(data_engine['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
+ "Value": "{}-{}-{}-subnet".format(data_engine['service_base_name'], data_engine['project_name'],
+ data_engine['endpoint_name'])}
+ data_engine['subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+ data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile' \
+ .format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['cluster_nodes_tag'] = {"Key": "dataengine_notebook_name",
"Value": os.environ['notebook_instance_name']}
data_engine['cluster_nodes_resource_tag'] = {"Key": os.environ['conf_tag_resource_id'],
- "Value": data_engine['service_base_name'] + ':' +
- data_engine['cluster_name']}
+ "Value": "{}:{}".format(data_engine['service_base_name'],
+ data_engine['cluster_name'])}
data_engine['cluster_nodes_billing_tag'] = {"Key": os.environ['conf_billing_tag_key'],
- "Value": os.environ['conf_billing_tag_value']}
+ "Value": os.environ['conf_billing_tag_value']}
data_engine['primary_disk_size'] = '30'
data_engine['instance_class'] = 'dataengine'
if os.environ['conf_shared_image_enabled'] == 'false':
- data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(data_engine['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
+ data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'],
+ os.environ['application'])
else:
data_engine['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(data_engine['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
+ data_engine['endpoint_name'],
+ os.environ['application'])
data_engine['notebook_image_name'] = (
- lambda x: '{0}-{1}-{2}-{3}'.format(data_engine['service_base_name'],
- os.environ['project_name'],
- os.environ['application'],
- os.environ['notebook_image_name'].lower().replace('_', '-')) if (
+ lambda x: '{0}-{1}-{4}-{2}-{3}'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ os.environ['application'],
+ os.environ['notebook_image_name'],
+ data_engine['endpoint_name']) if (
x != 'None' and x != '')
else data_engine['expected_image_name'])(str(os.environ.get('notebook_image_name')))
print('Searching pre-configured images')
- data_engine['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
- image_id = get_ami_id_by_name(data_engine['notebook_image_name'], 'available')
+ data_engine['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+ os.environ['conf_os_family'])])
+ image_id = dlab.meta_lib.get_ami_id_by_name(data_engine['notebook_image_name'], 'available')
if image_id != '' and os.environ['application'] in os.environ['dataengine_image_notebooks'].split(','):
data_engine['ami_id'] = image_id
print('Pre-configured image found. Using: {}'.format(data_engine['ami_id']))
@@ -129,8 +131,7 @@
print('No pre-configured image found. Using default one: {}'.format(data_engine['ami_id']))
except Exception as err:
- print("Failed to generate variables dictionary.")
- append_result("Failed to generate variables dictionary. Exception:" + str(err))
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
with open('/root/result.json', 'w') as f:
@@ -138,35 +139,39 @@
json.dump(data, f)
try:
- os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ data_engine['project_name'], data_engine['endpoint_name'], os.environ['conf_additional_tags'])
except KeyError:
- os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(data_engine['project_name'],
+ data_engine['endpoint_name'])
print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
try:
logging.info('[CREATE MASTER NODE]')
print('[CREATE MASTER NODE]')
data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "master"}
- params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
+ params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} " \
+ "--subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} " \
+ "--instance_class {}" \
.format(data_engine['master_node_name'], data_engine['ami_id'], data_engine['master_size'],
data_engine['key_name'],
- get_security_group_by_name(data_engine['dataengine_master_security_group_name']),
- get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
+ dlab.meta_lib.get_security_group_by_name(data_engine['dataengine_master_security_group_name']),
+ dlab.meta_lib.get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
data_engine['master_node_name'], data_engine['primary_disk_size'], data_engine['instance_class'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
- data_engine['master_id'] = get_instance_by_name(data_engine['tag_name'], data_engine['master_node_name'])
- create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag'], False)
- create_tag(data_engine['master_id'], data_engine['cluster_nodes_resource_tag'], False)
- create_tag(data_engine['master_id'], data_engine['cluster_nodes_billing_tag'], False)
- create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag_type'], False)
+ data_engine['master_id'] = dlab.meta_lib.get_instance_by_name(data_engine['tag_name'],
+ data_engine['master_node_name'])
+ dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag'], False)
+ dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_resource_tag'], False)
+ dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_billing_tag'], False)
+ dlab.actions_lib.create_tag(data_engine['master_id'], data_engine['cluster_nodes_tag_type'], False)
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create master instance.", str(err))
+ dlab.fab.append_result("Failed to create master instance.", str(err))
sys.exit(1)
try:
@@ -175,31 +180,32 @@
print('[CREATE SLAVE NODE {}]'.format(i + 1))
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
data_engine['cluster_nodes_tag_type'] = {"Key": "Type", "Value": "slave"}
- params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} --subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} --primary_disk_size {} --instance_class {}" \
+ params = "--node_name {} --ami_id {} --instance_type {} --key_name {} --security_group_ids {} " \
+ "--subnet_id {} --iam_profile {} --infra_tag_name {} --infra_tag_value {} " \
+ "--primary_disk_size {} --instance_class {}" \
.format(slave_name, data_engine['ami_id'], data_engine['slave_size'],
data_engine['key_name'],
- get_security_group_by_name(data_engine['dataengine_slave_security_group_name']),
- get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
+ dlab.meta_lib.get_security_group_by_name(data_engine['dataengine_slave_security_group_name']),
+ dlab.meta_lib.get_subnet_by_cidr(data_engine['subnet_cidr'], os.environ['aws_notebook_vpc_id']),
data_engine['notebook_dataengine_role_profile_name'], data_engine['tag_name'],
slave_name, data_engine['primary_disk_size'], data_engine['instance_class'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
- data_engine['slave_id'] = get_instance_by_name(data_engine['tag_name'], slave_name)
- create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag'], False)
- create_tag(data_engine['slave_id'], data_engine['cluster_nodes_resource_tag'], False)
- create_tag(data_engine['slave_id'], data_engine['cluster_nodes_billing_tag'], False)
- create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag_type'], False)
+ data_engine['slave_id'] = dlab.meta_lib.get_instance_by_name(data_engine['tag_name'], slave_name)
+ dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag'], False)
+ dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_resource_tag'], False)
+ dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_billing_tag'], False)
+ dlab.actions_lib.create_tag(data_engine['slave_id'], data_engine['cluster_nodes_tag_type'], False)
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
+ dlab.actions_lib.remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
try:
- remove_ec2(data_engine['tag_name'], slave_name)
+ dlab.actions_lib.remove_ec2(data_engine['tag_name'], slave_name)
except:
print("The slave instance {} hasn't been created.".format(slave_name))
- append_result("Failed to create slave instances.", str(err))
+ dlab.fab.append_result("Failed to create slave instances.", str(err))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
index 2e299d4..0450ff7 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_start.py
@@ -24,14 +24,18 @@
import logging
import json
import os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import sys
+import traceback
+from fabric.api import *
def start_data_engine(cluster_name):
print("Start Data Engine")
try:
- start_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+ dlab.actions_lib.start_ec2(os.environ['conf_tag_resource_id'], cluster_name)
except:
sys.exit(1)
@@ -47,7 +51,7 @@
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
data_engine = dict()
@@ -59,15 +63,14 @@
data_engine['computational_name'] = os.environ['computational_name']
except:
data_engine['computational_name'] = ''
- data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
data_engine['project_name'] = os.environ['project_name']
- data_engine['cluster_name'] = \
- data_engine['service_base_name'] + '-' + \
- data_engine['project_name'] + '-de-' + \
- data_engine['exploratory_name'] + '-' + \
- data_engine['computational_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
logging.info('[START DATA ENGINE CLUSTER]')
print('[START DATA ENGINE CLUSTER]')
@@ -76,19 +79,18 @@
data_engine['cluster_name']))
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed to start Data Engine.", str(err))
+ dlab.fab.append_result("Failed to start Data Engine.", str(err))
sys.exit(1)
try:
logging.info('[UPDATE LAST ACTIVITY TIME]')
print('[UPDATE LAST ACTIVITY TIME]')
data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
- data_engine['notebook_ip'] = get_instance_ip_address(data_engine['tag_name'],
- os.environ['notebook_instance_name']).get('Private')
- data_engine['computational_ip'] = get_instance_ip_address(data_engine['tag_name'],
- data_engine['computational_id']).get(
- 'Private')
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+ data_engine['notebook_ip'] = dlab.meta_lib.get_instance_ip_address(
+ data_engine['tag_name'], os.environ['notebook_instance_name']).get('Private')
+ data_engine['computational_ip'] = dlab.meta_lib.get_instance_ip_address(
+ data_engine['tag_name'], data_engine['computational_id']).get('Private')
data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
.format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -97,7 +99,7 @@
local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed to update last activity time.", str(err))
+ dlab.fab.append_result("Failed to update last activity time.", str(err))
raise Exception
except:
sys.exit(1)
@@ -108,6 +110,6 @@
"Action": "Start Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
index 3cb0d3b..d31d395 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_stop.py
@@ -24,14 +24,15 @@
import logging
import json
import os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
import sys
def stop_data_engine(cluster_name):
print("Stop Data Engine")
try:
- stop_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+ dlab.actions_lib.stop_ec2(os.environ['conf_tag_resource_id'], cluster_name)
except:
sys.exit(1)
@@ -47,7 +48,7 @@
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
data_engine_config = dict()
try:
@@ -58,14 +59,13 @@
data_engine_config['computational_name'] = os.environ['computational_name']
except:
data_engine_config['computational_name'] = ''
- data_engine_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ data_engine_config['service_base_name'] = (os.environ['conf_service_base_name'])
data_engine_config['project_name'] = os.environ['project_name']
- data_engine_config['cluster_name'] = \
- data_engine_config['service_base_name'] + '-' \
- + data_engine_config['project_name'] + '-de-' + \
- data_engine_config['exploratory_name'] + '-' \
- + data_engine_config['computational_name']
+ data_engine_config['endpoint_name'] = os.environ['endpoint_name']
+ data_engine_config['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine_config['service_base_name'],
+ data_engine_config['project_name'],
+ data_engine_config['endpoint_name'],
+ data_engine_config['computational_name'])
logging.info('[STOP DATA ENGINE CLUSTER]')
print('[STOP DATA ENGINE CLUSTER]')
@@ -74,7 +74,7 @@
data_engine_config['cluster_name']))
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed to stop Data Engine.", str(err))
+ dlab.fab.append_result("Failed to stop Data Engine.", str(err))
sys.exit(1)
try:
@@ -83,6 +83,6 @@
"Action": "Stop Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
index a036f74..7d8c10d 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/dataengine_terminate.py
@@ -24,8 +24,9 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import traceback
import os
@@ -34,14 +35,14 @@
cluster_name, remote_kernel_name):
print("Terminating data engine cluster")
try:
- remove_ec2(os.environ['conf_tag_resource_id'], cluster_name)
+ dlab.actions_lib.remove_ec2(os.environ['conf_tag_resource_id'], cluster_name)
except:
sys.exit(1)
print("Removing Data Engine kernels from notebook")
try:
- remove_dataengine_kernels(tag_name, notebook_name,
- os_user, key_path, remote_kernel_name)
+ dlab.actions_lib.remove_dataengine_kernels(tag_name, notebook_name,
+ os_user, key_path, remote_kernel_name)
except:
sys.exit(1)
@@ -57,7 +58,7 @@
filename=local_log_filepath)
# generating variables dictionary
print('Generating infrastructure names and tags')
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
data_engine = dict()
try:
@@ -68,18 +69,16 @@
data_engine['computational_name'] = os.environ['computational_name']
except:
data_engine['computational_name'] = ''
- data_engine['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+ data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['project_name'] = os.environ['project_name']
- data_engine['cluster_name'] = \
- data_engine['service_base_name'] + '-' + \
- data_engine['project_name'] + '-de-' + \
- data_engine['exploratory_name'] + '-' +\
- data_engine['computational_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
data_engine['notebook_name'] = os.environ['notebook_instance_name']
- data_engine['key_path'] = os.environ['conf_key_dir'] + '/' + \
- os.environ['conf_key_name'] + '.pem'
+ data_engine['key_path'] = "{}/{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
try:
logging.info('[TERMINATE DATA ENGINE]')
@@ -93,7 +92,7 @@
data_engine['cluster_name']), data_engine['cluster_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate Data Engine.", str(err))
+ dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
raise Exception
except:
sys.exit(1)
@@ -104,6 +103,6 @@
"Action": "Terminate Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
index 7d504d8..a2ca856 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/deeplearning_configure.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -42,69 +44,77 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['network_type'] = os.environ['conf_network_type']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['network_type'] = os.environ['conf_network_type']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- if notebook_config['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ edge_instance_name).get('Private')
+ notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ edge_instance_name)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -123,16 +132,16 @@
print('[CONFIGURE PROXY ON DEEP LEARNING INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
- .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -145,12 +154,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -166,9 +174,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -178,7 +185,7 @@
"--os_user {2} --jupyter_version {3} " \
"--scala_version {4} --spark_version {5} " \
"--hadoop_version {6} --region {7} " \
- "--r_mirror {8} --ip_adress {9} --exploratory_name {10} --edge_ip {11}" \
+ "--r_mirror {8} --ip_address {9} --exploratory_name {10} --edge_ip {11}" \
.format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
os.environ['notebook_jupyter_version'], os.environ['notebook_scala_version'],
os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
@@ -190,9 +197,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure Deep Learning node.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -203,12 +209,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -224,9 +229,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -251,99 +255,107 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed edge reverse proxy template.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed edge reverse proxy template.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING AMI]')
- ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+ ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
- os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['conf_additional_tags'])
except KeyError:
os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
- os.environ['project_name'], os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
else:
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
- os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+ os.environ['conf_additional_tags'], notebook_config['endpoint_name'])
except KeyError:
os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
- os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ notebook_config['endpoint_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
+ try:
+ # generating output information
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['instance_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+ tensor_board_url = 'http://' + ip_address + ':6006'
+ jupyter_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
+ jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private DNS: {}".format(dns_name))
+ print("Private IP: {}".format(ip_address))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name'])))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['user_keyname']))
+ print("AMI name: {}".format(notebook_config['notebook_image_name']))
+ print("Profile name: {}".format(notebook_config['role_profile_name']))
+ print("SG name: {}".format(notebook_config['security_group_name']))
- # generating output information
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- tensor_board_url = 'http://' + ip_address + ':6006'
- jupyter_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
- jupyter_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
- jupyter_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
- tensorboard_access_url = "https://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private DNS: {}".format(dns_name))
- print("Private IP: {}".format(ip_address))
- print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(notebook_config['user_keyname']))
- print("AMI name: {}".format(notebook_config['notebook_image_name']))
- print("Profile name: {}".format(notebook_config['role_profile_name']))
- print("SG name: {}".format(notebook_config['security_group_name']))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'],notebook_config['dlab_ssh_user'], ip_address))
+ print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'],notebook_config['dlab_ssh_user'], ip_address))
- print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
- with open("/root/result.json", 'w') as result:
- res = {"hostname": dns_name,
- "ip": ip_address,
- "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "notebook_image_name": notebook_config['notebook_image_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Jupyter",
- "url": jupyter_notebook_access_url},
- {"description": "TensorBoard",
- "url": tensorboard_access_url},
- {"description": "Ungit",
- "url": jupyter_ungit_access_url}#,
- #{"description": "Jupyter (via tunnel)",
- # "url": jupyter_url},
- #{"description": "TensorBoard (via tunnel)",
- # "url": tensor_board_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url},
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": dns_name,
+ "ip": ip_address,
+ "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name']),
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "notebook_image_name": notebook_config['notebook_image_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Jupyter",
+ "url": jupyter_notebook_access_url},
+ {"description": "TensorBoard",
+ "url": tensorboard_access_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_access_url}#,
+ #{"description": "Jupyter (via tunnel)",
+ # "url": jupyter_url},
+ #{"description": "TensorBoard (via tunnel)",
+ # "url": tensor_board_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url},
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py b/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
index 4d414b1..d96ef49 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_configure.py
@@ -22,10 +22,17 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
+import uuid
+from fabric.api import *
+
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,81 +42,103 @@
level=logging.DEBUG,
filename=local_log_filepath)
- print('Generating infrastructure names and tags')
- edge_conf = dict()
- edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- edge_conf['key_name'] = os.environ['conf_key_name']
- edge_conf['user_key'] = os.environ['key']
- edge_conf['project_name'] = os.environ['project_name']
- edge_conf['endpoint_name'] = os.environ['endpoint_name']
- edge_conf['instance_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'], edge_conf['project_name'],
- edge_conf['endpoint_name'])
- edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
- edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
- edge_conf['project_name'],
- edge_conf['endpoint_name']).lower().replace('_', '-')
- edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
- edge_conf['endpoint_name']).lower().replace('_', '-')
- edge_conf['edge_security_group_name'] = '{}-sg'.format(edge_conf['instance_name'])
- edge_conf['notebook_instance_name'] = '{}-{}-nb'.format(edge_conf['service_base_name'],
- os.environ['project_name'])
- edge_conf['notebook_role_profile_name'] = '{}-{}-{}-nb-Profile' \
- .format(edge_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_conf['dataengine_instances_name'] = '{}-{}-dataengine' \
- .format(edge_conf['service_base_name'], os.environ['project_name'])
- tag = {"Key": edge_conf['tag_name'],
- "Value": "{}-{}-subnet".format(edge_conf['service_base_name'], os.environ['project_name'])}
- edge_conf['private_subnet_cidr'] = get_subnet_by_tag(tag)
- edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
- edge_conf['network_type'] = os.environ['conf_network_type']
- if edge_conf['network_type'] == 'public':
- edge_conf['edge_public_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
- 'Public')
- edge_conf['edge_private_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
- 'Private')
- elif edge_conf['network_type'] == 'private':
- edge_conf['edge_private_ip'] = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name']).get(
- 'Private')
- edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
- edge_conf['vpc1_cidrs'] = get_vpc_cidr_by_id(os.environ['aws_vpc_id'])
+ def clear_resources():
+ dlab.actions_lib.remove_all_iam_resources('notebook', edge_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', edge_conf['project_name'])
+ dlab.actions_lib.remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+ dlab.actions_lib.remove_sgroups(edge_conf['dataengine_instances_name'])
+ dlab.actions_lib.remove_sgroups(edge_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(edge_conf['instance_name'])
+ dlab.actions_lib.remove_s3('edge', edge_conf['project_name'])
+
try:
- edge_conf['vpc2_cidrs'] = get_vpc_cidr_by_id(os.environ['aws_notebook_vpc_id'])
- edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs'] + edge_conf['vpc2_cidrs']))
- except KeyError:
- edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs']))
-
- edge_conf['allowed_ip_cidr'] = list()
- for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
- edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
-
- instance_hostname = get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
-
- if os.environ['conf_stepcerts_enabled'] == 'true':
- step_cert_sans = ' --san {0} '.format(edge_conf['edge_private_ip'])
+ print('Generating infrastructure names and tags')
+ edge_conf = dict()
+ edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
+ edge_conf['key_name'] = os.environ['conf_key_name']
+ edge_conf['user_key'] = os.environ['key']
+ edge_conf['project_name'] = os.environ['project_name']
+ edge_conf['endpoint_name'] = os.environ['endpoint_name']
+ edge_conf['instance_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
+ edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name']).replace('_', '-').lower()
+ edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['endpoint_name']
+ ).replace('_', '-').lower()
+ edge_conf['edge_security_group_name'] = '{}-{}-{}-edge-sg'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['notebook_instance_name'] = '{}-{}-{}-nb'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['notebook_role_profile_name'] = '{}-{}-{}-nb-profile'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['dataengine_instances_name'] = '{}-{}-{}-de'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ tag = {"Key": edge_conf['tag_name'],
+ "Value": "{}-{}-{}-subnet".format(edge_conf['service_base_name'], edge_conf['project_name'],
+ edge_conf['endpoint_name'])}
+ edge_conf['private_subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+ edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+ edge_conf['network_type'] = os.environ['conf_network_type']
if edge_conf['network_type'] == 'public':
- step_cert_sans += ' --san {0} --san {1}'.format(
- get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name']),
- edge_conf['edge_public_ip'])
- else:
- step_cert_sans = ''
+ edge_conf['edge_public_ip'] = dlab.meta_lib.get_instance_ip_address(edge_conf['tag_name'],
+ edge_conf['instance_name']).get('Public')
+ edge_conf['edge_private_ip'] = dlab.meta_lib.get_instance_ip_address(
+ edge_conf['tag_name'], edge_conf['instance_name']).get('Private')
+ elif edge_conf['network_type'] == 'private':
+ edge_conf['edge_private_ip'] = dlab.meta_lib.get_instance_ip_address(
+ edge_conf['tag_name'], edge_conf['instance_name']).get('Private')
+ edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
+ edge_conf['vpc1_cidrs'] = dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_vpc_id'])
+ try:
+ edge_conf['vpc2_cidrs'] = dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_notebook_vpc_id'])
+ edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs'] + edge_conf['vpc2_cidrs']))
+ except KeyError:
+ edge_conf['vpc_cidrs'] = list(set(edge_conf['vpc1_cidrs']))
+
+ edge_conf['allowed_ip_cidr'] = list()
+ for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+ edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+
+ edge_conf['instance_hostname'] = dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'],
+ edge_conf['instance_name'])
+ edge_conf['keyfile_name'] = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
+
+ if os.environ['conf_stepcerts_enabled'] == 'true':
+ edge_conf['step_cert_sans'] = ' --san {0} '.format(edge_conf['edge_private_ip'])
+ if edge_conf['network_type'] == 'public':
+ edge_conf['step_cert_sans'] += ' --san {0} --san {1}'.format(
+ dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name']),
+ edge_conf['edge_public_ip'])
+ else:
+ edge_conf['step_cert_sans'] = ''
+ if os.environ['conf_os_family'] == 'debian':
+ edge_conf['initial_user'] = 'ubuntu'
+ edge_conf['sudo_group'] = 'sudo'
+ if os.environ['conf_os_family'] == 'redhat':
+ edge_conf['initial_user'] = 'ec2-user'
+ edge_conf['sudo_group'] = 'wheel'
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ clear_resources()
+ sys.exit(1)
try:
- if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
- if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
-
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- edge_conf['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ edge_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -117,37 +146,24 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
- remove_sgroups(edge_conf['dataengine_instances_name'])
- remove_sgroups(edge_conf['notebook_instance_name'])
- remove_sgroups(edge_conf['instance_name'])
- remove_s3('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ clear_resources()
sys.exit(1)
try:
print('[INSTALLING PREREQUISITES]')
logging.info('[INSTALLING PREREQUISITES]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
- format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['aws_region'])
+ format(edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+ os.environ['aws_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
- remove_sgroups(edge_conf['dataengine_instances_name'])
- remove_sgroups(edge_conf['notebook_instance_name'])
- remove_sgroups(edge_conf['instance_name'])
- remove_s3('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -155,66 +171,56 @@
logging.info('[INSTALLING HTTP PROXY]')
additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
"template_file": "/root/templates/squid.conf",
- "project_name": os.environ['project_name'],
+ "project_name": edge_conf['project_name'],
"ldap_host": os.environ['ldap_hostname'],
"ldap_dn": os.environ['ldap_dn'],
"ldap_user": os.environ['ldap_service_username'],
"ldap_password": os.environ['ldap_service_password'],
"vpc_cidrs": edge_conf['vpc_cidrs'],
"allowed_ip_cidr": edge_conf['allowed_ip_cidr']}
- params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
- .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+ params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+ edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+ edge_conf['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('configure_http_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing http proxy.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
- remove_sgroups(edge_conf['dataengine_instances_name'])
- remove_sgroups(edge_conf['notebook_instance_name'])
- remove_sgroups(edge_conf['instance_name'])
- remove_s3('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed installing http proxy.", str(err))
+ clear_resources()
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
- additional_config = {"user_keyname": os.environ['project_name'],
+ additional_config = {"user_keyname": edge_conf['project_name'],
"user_keydir": os.environ['conf_key_dir'],
"user_key": edge_conf['user_key']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+ edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+ edge_conf['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key." + str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
- remove_sgroups(edge_conf['dataengine_instances_name'])
- remove_sgroups(edge_conf['notebook_instance_name'])
- remove_sgroups(edge_conf['instance_name'])
- remove_s3('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed installing users key." + str(err))
+ clear_resources()
sys.exit(1)
try:
print('[INSTALLING NGINX REVERSE PROXY]')
logging.info('[INSTALLING NGINX REVERSE PROXY]')
- keycloak_client_secret = str(uuid.uuid4())
+ edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
- "--step_cert_sans '{}' " \
- .format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], edge_conf['service_base_name'] +
- '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'], keycloak_client_secret, step_cert_sans)
+ "--step_cert_sans '{}' ".format(
+ edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+ '{}-{}-{}'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+ edge_conf['endpoint_name']),
+ edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
try:
local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
except:
@@ -222,36 +228,31 @@
raise Exception
keycloak_params = "--service_base_name {} --keycloak_auth_server_url {} --keycloak_realm_name {} " \
"--keycloak_user {} --keycloak_user_password {} --keycloak_client_secret {} " \
- "--edge_public_ip {} --hostname {} --project_name {} --endpoint_name {} " \
- .format(edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
- os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
- os.environ['keycloak_user_password'],
- keycloak_client_secret, edge_conf['edge_public_ip'], instance_hostname, os.environ['project_name'], os.environ['endpoint_name'])
+ "--edge_public_ip {} --hostname {} --project_name {} --endpoint_name {} ".format(
+ edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
+ os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
+ os.environ['keycloak_user_password'], edge_conf['keycloak_client_secret'],
+ edge_conf['instance_hostname'], edge_conf['instance_hostname'], edge_conf['project_name'],
+ edge_conf['endpoint_name'])
try:
local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing nginx reverse proxy." + str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
- remove_sgroups(edge_conf['dataengine_instances_name'])
- remove_sgroups(edge_conf['notebook_instance_name'])
- remove_sgroups(edge_conf['instance_name'])
- remove_s3('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed installing nginx reverse proxy." + str(err))
+ clear_resources()
sys.exit(1)
try:
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(edge_conf['instance_name']))
- print("Hostname: {}".format(instance_hostname))
+ print("Hostname: {}".format(edge_conf['instance_hostname']))
print("Public IP: {}".format(edge_conf['edge_public_ip']))
print("Private IP: {}".format(edge_conf['edge_private_ip']))
- print("Instance ID: {}".format(get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name'])))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(edge_conf['tag_name'],
+ edge_conf['instance_name'])))
print("Key name: {}".format(edge_conf['key_name']))
print("Bucket name: {}".format(edge_conf['bucket_name']))
print("Shared bucket name: {}".format(edge_conf['shared_bucket_name']))
@@ -260,10 +261,10 @@
print("Edge SG: {}".format(edge_conf['edge_security_group_name']))
print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr']))
with open("/root/result.json", 'w') as result:
- res = {"hostname": instance_hostname,
+ res = {"hostname": edge_conf['instance_hostname'],
"public_ip": edge_conf['edge_public_ip'],
"ip": edge_conf['edge_private_ip'],
- "instance_id": get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name']),
+ "instance_id": dlab.meta_lib.get_instance_by_name(edge_conf['tag_name'], edge_conf['instance_name']),
"key_name": edge_conf['key_name'],
"user_own_bicket_name": edge_conf['bucket_name'],
"shared_bucket_name": edge_conf['shared_bucket_name'],
@@ -274,13 +275,13 @@
"edge_sg": edge_conf['edge_security_group_name'],
"notebook_subnet": edge_conf['private_subnet_cidr'],
"full_edge_conf": edge_conf,
- "project_name": os.environ['project_name'],
+ "project_name": edge_conf['project_name'],
"@class": "com.epam.dlab.dto.aws.edge.EdgeInfoAws",
"Action": "Create new EDGE server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ clear_resources()
+ sys.exit(1)
- sys.exit(0)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_start.py b/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
index 2449cd7..a9f856a 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_start.py
@@ -21,9 +21,14 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import sys
+import logging
+import os
+import json
+
if __name__ == "__main__":
@@ -35,29 +40,28 @@
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
edge_conf = dict()
- edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
edge_conf['project_name'] = os.environ['project_name']
edge_conf['endpoint_name'] = os.environ['endpoint_name']
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
- edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
+ edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
logging.info('[START EDGE]')
print('[START EDGE]')
try:
- start_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+ dlab.actions_lib.start_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed to start edge.", str(err))
+ dlab.fab.append_result("Failed to start edge.", str(err))
sys.exit(1)
try:
- instance_hostname = get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
- addresses = get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name'])
+ instance_hostname = dlab.meta_lib.get_instance_hostname(edge_conf['tag_name'], edge_conf['instance_name'])
+ addresses = dlab.meta_lib.get_instance_ip_address(edge_conf['tag_name'], edge_conf['instance_name'])
ip_address = addresses.get('Private')
public_ip_address = addresses.get('Public')
print('[SUMMARY]')
@@ -74,7 +78,6 @@
"Action": "Start up notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
-
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_status.py b/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
index 86ff6e3..d8bd92e 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_status.py
@@ -23,20 +23,26 @@
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import logging
+import traceback
+from fabric.api import *
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/edge/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Getting statuses of DLAB resources')
try:
@@ -49,6 +55,5 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to collect necessary information.", str(err))
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to collect necessary information.", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py b/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
index 3f99b36..3948781 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/edge_stop.py
@@ -21,9 +21,13 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import sys
+import os
+import logging
+import json
if __name__ == "__main__":
@@ -35,23 +39,22 @@
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
edge_conf = dict()
- edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
+ edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
edge_conf['project_name'] = os.environ['project_name']
edge_conf['endpoint_name'] = os.environ['endpoint_name']
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
- edge_conf['tag_name'] = edge_conf['service_base_name'] + '-Tag'
+ edge_conf['tag_name'] = edge_conf['service_base_name'] + '-tag'
logging.info('[STOP EDGE]')
print('[STOP EDGE]')
try:
- stop_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
+ dlab.actions_lib.stop_ec2(edge_conf['tag_name'], edge_conf['instance_name'])
except Exception as err:
- append_result("Failed to stop edge.", str(err))
+ dlab.fab.append_result("Failed to stop edge.", str(err))
sys.exit(1)
try:
@@ -60,7 +63,6 @@
"Action": "Stop edge server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
-
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
index 3ff45c8..cc53b22 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -42,67 +44,77 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['network_type'] = os.environ['conf_network_type']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['network_type'] = os.environ['conf_network_type']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- if notebook_config['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ edge_instance_name).get('Private')
+ notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ edge_instance_name)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -110,9 +122,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -120,36 +131,34 @@
logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
- params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
- .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
- notebook_config['dlab_ssh_user'])
+ params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}".format(
+ instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
- params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".\
- format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'],
- edge_instance_private_ip)
+ params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}".format(
+ instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'],
+ edge_instance_private_ip)
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
@@ -164,7 +173,7 @@
"--os_user {5} " \
"--scala_version {6} " \
"--r_mirror {7} " \
- "--ip_adress {8} " \
+ "--ip_address {8} " \
"--exploratory_name {9} " \
"--edge_ip {10}".\
format(instance_hostname,
@@ -184,9 +193,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure jupyter.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure jupyter.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -199,12 +207,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -216,12 +223,11 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -237,9 +243,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -264,92 +269,103 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING AMI]')
- ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+ ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
except KeyError:
- os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(os.environ['project_name'], os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
+ os.environ['project_name'], os.environ['endpoint_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
else:
print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+ os.environ['conf_additional_tags'], os.environ['endpoint_name'])
except KeyError:
os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
- # generating output information
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
- jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
- jupyter_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
- jupyter_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private DNS: {}".format(dns_name))
- print("Private IP: {}".format(ip_address))
- print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(notebook_config['user_keyname']))
- print("Image name: {}".format(notebook_config['notebook_image_name']))
- print("Profile name: {}".format(notebook_config['role_profile_name']))
- print("SG name: {}".format(notebook_config['security_group_name']))
- print("Jupyter URL: {}".format(jupyter_ip_url))
- print("Jupyter URL: {}".format(jupyter_dns_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
- print("ReverseProxyUngit".format(jupyter_ungit_access_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+ try:
+ # generating output information
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['instance_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+ jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+ jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
+ jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private DNS: {}".format(dns_name))
+ print("Private IP: {}".format(ip_address))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name'])))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['user_keyname']))
+ print("Image name: {}".format(notebook_config['notebook_image_name']))
+ print("Profile name: {}".format(notebook_config['role_profile_name']))
+ print("SG name: {}".format(notebook_config['security_group_name']))
+ print("Jupyter URL: {}".format(jupyter_ip_url))
+ print("Jupyter URL: {}".format(jupyter_dns_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
+ print("ReverseProxyUngit".format(jupyter_ungit_access_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+ print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": dns_name,
- "ip": ip_address,
- "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "notebook_image_name": notebook_config['notebook_image_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Jupyter",
- "url": jupyter_notebook_access_url},
- {"description": "Ungit",
- "url": jupyter_ungit_access_url}#,
- #{"description": "Jupyter (via tunnel)",
- # "url": jupyter_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": dns_name,
+ "ip": ip_address,
+ "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name']),
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "notebook_image_name": notebook_config['notebook_image_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Jupyter",
+ "url": jupyter_notebook_access_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_access_url}#,
+ #{"description": "Jupyter (via tunnel)",
+ # "url": jupyter_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
index b864fd4..d828df5 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/jupyterlab_configure.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -42,69 +44,77 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = os.environ['conf_service_base_name']
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['edge_user_name']
- notebook_config['network_type'] = os.environ['conf_network_type']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'],
- notebook_config['instance_name']).get('Private')
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['network_type'] = os.environ['conf_network_type']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- if notebook_config['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ edge_instance_name).get('Private')
+ notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ edge_instance_name)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ (instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -123,16 +132,16 @@
print('[CONFIGURE PROXY ON JUPYTERLAB INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
- .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -147,9 +156,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
@@ -165,7 +173,7 @@
"--os_user {} " \
"--scala_version {} " \
"--r_mirror {} " \
- "--ip_adress {} " \
+ "--ip_address {} " \
"--exploratory_name {}".\
format(instance_hostname,
keyfile_name,
@@ -184,9 +192,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure jupyterlab.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure jupyterlab.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -199,12 +206,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -216,12 +222,11 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -237,9 +242,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -264,29 +268,27 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['shared_image_enabled'] == 'true':
try:
print('[CREATING AMI]')
- ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+ ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '':
print("Looks like it's first time we configure notebook server. Creating image.")
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -304,9 +306,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy for docker.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -321,60 +322,69 @@
try:
local("~/scripts/jupyterlab_container_start.py {}".format(params))
except:
- traceback.print_exc()
- raise Exception
+ traceback.print_exc()
+ raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to start Jupyter container.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to start Jupyter container.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
- # generating output information
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
- jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
- jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
- jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private DNS: {}".format(dns_name))
- print("Private IP: {}".format(ip_address))
- print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(notebook_config['user_keyname']))
- print("Image name: {}".format(notebook_config['notebook_image_name']))
- print("Profile name: {}".format(notebook_config['role_profile_name']))
- print("SG name: {}".format(notebook_config['security_group_name']))
- print("JupyterLab URL: {}".format(jupyter_ip_url))
- print("JupyterLab URL: {}".format(jupyter_dns_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
- print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+ try:
+ # generating output information
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['instance_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+ jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+ jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
+ jupyter_notebook_acces_url = "http://{}/{}/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ jupyter_ungit_acces_url = "http://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private DNS: {}".format(dns_name))
+ print("Private IP: {}".format(ip_address))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name'])))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['user_keyname']))
+ print("Image name: {}".format(notebook_config['notebook_image_name']))
+ print("Profile name: {}".format(notebook_config['role_profile_name']))
+ print("SG name: {}".format(notebook_config['security_group_name']))
+ print("JupyterLab URL: {}".format(jupyter_ip_url))
+ print("JupyterLab URL: {}".format(jupyter_dns_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
+ print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+ print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": dns_name,
- "ip": ip_address,
- "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "notebook_image_name": notebook_config['notebook_image_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "JupyterLab",
- "url": jupyter_notebook_acces_url},
- {"description": "Ungit",
- "url": jupyter_ungit_acces_url},
- #{"description": "JupyterLab (via tunnel)",
- # "url": jupyter_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": dns_name,
+ "ip": ip_address,
+ "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name']),
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "notebook_image_name": notebook_config['notebook_image_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "JupyterLab",
+ "url": jupyter_notebook_acces_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_acces_url},
+ #{"description": "JupyterLab (via tunnel)",
+ # "url": jupyter_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
index 009b81e..9d44ba5 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/project_prepare.py
@@ -22,12 +22,16 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
import traceback
import boto3
+import logging
+from fabric.api import *
if __name__ == "__main__":
@@ -38,121 +42,137 @@
level=logging.DEBUG,
filename=local_log_filepath)
- create_aws_config_files()
- print('Generating infrastructure names and tags')
- project_conf = dict()
- project_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- project_conf['endpoint_name'] = os.environ['endpoint_name']
- project_conf['endpoint_tag'] = os.environ['endpoint_name']
- project_conf['project_name'] = os.environ['project_name']
- project_conf['project_tag'] = os.environ['project_name']
- project_conf['key_name'] = os.environ['conf_key_name']
- project_conf['public_subnet_id'] = os.environ['aws_subnet_id']
- project_conf['vpc_id'] = os.environ['aws_vpc_id']
- project_conf['region'] = os.environ['aws_region']
- project_conf['ami_id'] = get_ami_id(os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
- project_conf['instance_size'] = os.environ['aws_edge_instance_size']
- project_conf['sg_ids'] = os.environ['aws_security_groups_ids']
- project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- project_conf['tag_name'] = '{}-Tag'.format(project_conf['service_base_name'])
- project_conf['bucket_name_tag'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
- project_conf['project_name'],
- project_conf['endpoint_name'])
- project_conf['bucket_name'] = project_conf['bucket_name_tag'].lower().replace('_', '-')
- project_conf['shared_bucket_name_tag'] = '{0}-{1}-shared-bucket'.format(
- project_conf['service_base_name'], project_conf['endpoint_tag'])
- project_conf['shared_bucket_name'] = project_conf['shared_bucket_name_tag'].lower().replace('_', '-')
- project_conf['edge_role_name'] = '{}-{}-edge-Role'.format(
- project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
- project_conf['edge_role_profile_name'] = '{}-{}-edge-Profile'.format(
- project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
- project_conf['edge_policy_name'] = '{}-{}-edge-Policy'.format(
- project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'])
- project_conf['edge_security_group_name'] = '{}-sg'.format(project_conf['edge_instance_name'])
- project_conf['notebook_instance_name'] = '{}-{}-nb'.format(project_conf['service_base_name'],
- os.environ['project_name'])
- project_conf['dataengine_instances_name'] = '{}-{}-dataengine' \
- .format(project_conf['service_base_name'], os.environ['project_name'])
- project_conf['notebook_dataengine_role_name'] = '{}-{}-{}-nb-de-Role' \
- .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'],os.environ['endpoint_name'])
- project_conf['notebook_dataengine_policy_name'] = '{}-{}-{}-nb-de-Policy' \
- .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'],os.environ['endpoint_name'])
- project_conf['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(project_conf['service_base_name'].lower().replace('-', '_'), os.environ['project_name'],os.environ['endpoint_name'])
- project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
- os.environ['project_name'],os.environ['endpoint_name'])
- project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
- project_conf['private_subnet_name'] = '{0}-{1}-subnet'.format(project_conf['service_base_name'],
- os.environ['project_name'])
- project_conf['dataengine_master_security_group_name'] = '{}-{}-dataengine-master-sg' \
- .format(project_conf['service_base_name'], os.environ['project_name'])
- project_conf['dataengine_slave_security_group_name'] = '{}-{}-dataengine-slave-sg' \
- .format(project_conf['service_base_name'], os.environ['project_name'])
- project_conf['allowed_ip_cidr'] = list()
- for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
- project_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ','')})
- project_conf['network_type'] = os.environ['conf_network_type']
- project_conf['all_ip_cidr'] = '0.0.0.0/0'
- project_conf['zone'] = os.environ['aws_region'] + os.environ['aws_zone']
- project_conf['elastic_ip_name'] = '{0}-{1}-edge-EIP'.format(project_conf['service_base_name'],
- os.environ['project_name'])
- project_conf['provision_instance_ip'] = None
- project_conf['local_endpoint'] = False
try:
- project_conf['provision_instance_ip'] = get_instance_ip_address(
- project_conf['tag_name'], '{0}-{1}-endpoint'.format(project_conf['service_base_name'],
- os.environ['endpoint_name'])).get('Private') + "/32"
- except:
- project_conf['provision_instance_ip'] = get_instance_ip_address(project_conf['tag_name'], '{0}-ssn'.format(
- project_conf['service_base_name'])).get('Private') + "/32"
- project_conf['local_endpoint'] = True
- if 'aws_user_predefined_s3_policies' not in os.environ:
- os.environ['aws_user_predefined_s3_policies'] = 'None'
-
- try:
- if os.environ['conf_user_subnets_range'] == '':
- raise KeyError
- except KeyError:
- os.environ['conf_user_subnets_range'] = ''
-
- # FUSE in case of absence of user's key
- try:
- project_conf['user_key'] = os.environ['key']
+ dlab.actions_lib.create_aws_config_files()
+ print('Generating infrastructure names and tags')
+ project_conf = dict()
+ project_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
+ project_conf['endpoint_name'] = os.environ['endpoint_name']
+ project_conf['endpoint_tag'] = project_conf['endpoint_name']
+ project_conf['project_name'] = os.environ['project_name']
+ project_conf['project_tag'] = project_conf['project_name']
+ project_conf['key_name'] = os.environ['conf_key_name']
+ project_conf['public_subnet_id'] = os.environ['aws_subnet_id']
+ project_conf['vpc_id'] = os.environ['aws_vpc_id']
+ project_conf['region'] = os.environ['aws_region']
+ project_conf['ami_id'] = dlab.meta_lib.get_ami_id(os.environ['aws_{}_image_name'.format(
+ os.environ['conf_os_family'])])
+ project_conf['instance_size'] = os.environ['aws_edge_instance_size']
+ project_conf['sg_ids'] = os.environ['aws_security_groups_ids']
+ project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['tag_name'] = '{}-tag'.format(project_conf['service_base_name'])
+ project_conf['bucket_name_tag'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['bucket_name'] = project_conf['bucket_name_tag'].lower().replace('_', '-')
+ project_conf['shared_bucket_name_tag'] = '{0}-{1}-shared-bucket'.format(
+ project_conf['service_base_name'], project_conf['endpoint_tag'])
+ project_conf['shared_bucket_name'] = project_conf['shared_bucket_name_tag'].lower().replace('_', '-')
+ project_conf['edge_role_name'] = '{}-{}-{}-edge-role'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['edge_role_profile_name'] = '{}-{}-{}-edge-profile'.format(
+ project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+ project_conf['edge_policy_name'] = '{}-{}-{}-edge-policy'.format(
+ project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+ project_conf['edge_security_group_name'] = '{}-{}-{}-edge-sg'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['notebook_instance_name'] = '{}-{}-{}-nb'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['dataengine_instances_name'] = '{}-{}-{}-de'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['notebook_dataengine_role_name'] = '{}-{}-{}-nb-de-role'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['notebook_dataengine_policy_name'] = '{}-{}-{}-nb-de-policy'.format(
+ project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+ project_conf['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+ project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+ project_conf['private_subnet_name'] = '{0}-{1}-{2}-subnet'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+ project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+ project_conf['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+ project_conf['service_base_name'], project_conf['project_name'], project_conf['endpoint_name'])
+ project_conf['allowed_ip_cidr'] = list()
+ for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+ project_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ', '')})
+ project_conf['network_type'] = os.environ['conf_network_type']
+ project_conf['all_ip_cidr'] = '0.0.0.0/0'
+ project_conf['zone'] = os.environ['aws_region'] + os.environ['aws_zone']
+ project_conf['elastic_ip_name'] = '{0}-{1}-{2}-edge-static-ip'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['provision_instance_ip'] = None
+ project_conf['local_endpoint'] = False
try:
- local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
- project_conf['project_name']))
+ project_conf['provision_instance_ip'] = '{}/32'.format(dlab.meta_lib.get_instance_ip_address(
+ project_conf['tag_name'], '{0}-{1}-endpoint'.format(project_conf['service_base_name'],
+ project_conf['endpoint_name'])).get('Private'))
except:
- print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
- except KeyError:
- print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+ project_conf['provision_instance_ip'] = '{}/32'.format(dlab.meta_lib.get_instance_ip_address(
+ project_conf['tag_name'], '{0}-ssn'.format(project_conf['service_base_name'])).get('Private'))
+ project_conf['local_endpoint'] = True
+ if 'aws_user_predefined_s3_policies' not in os.environ:
+ os.environ['aws_user_predefined_s3_policies'] = 'None'
+
+ try:
+ if os.environ['conf_user_subnets_range'] == '':
+ raise KeyError
+ except KeyError:
+ os.environ['conf_user_subnets_range'] = ''
+
+ # FUSE in case of absence of user's key
+ try:
+ project_conf['user_key'] = os.environ['key']
+ try:
+ local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
+ project_conf['project_name']))
+ except:
+ print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
+ except KeyError:
+ print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+ sys.exit(1)
+
+ print("Will create exploratory environment with edge node as access point as following: {}".
+ format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+ logging.info(json.dumps(project_conf))
+
+ if 'conf_additional_tags' in os.environ:
+ project_conf['bucket_additional_tags'] = ';' + os.environ['conf_additional_tags']
+ os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
+ ';project_tag:{0};endpoint_tag:{1};'.format(
+ project_conf['project_tag'], project_conf['endpoint_tag'])
+ else:
+ project_conf['bucket_additional_tags'] = ''
+ os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(project_conf['project_tag'],
+ project_conf['endpoint_tag'])
+ print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
- print("Will create exploratory environment with edge node as access point as following: {}".
- format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
- logging.info(json.dumps(project_conf))
-
- if 'conf_additional_tags' in os.environ:
- project_conf['bucket_additional_tags'] = ';' + os.environ['conf_additional_tags']
- os.environ['conf_additional_tags'] = os.environ['conf_additional_tags'] + \
- ';project_tag:{0};endpoint_tag:{1};'.format(
- project_conf['project_tag'], project_conf['endpoint_tag'])
- else:
- project_conf['bucket_additional_tags'] = ''
- os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(project_conf['project_tag'],
- project_conf['endpoint_tag'])
- print('Additional tags will be added: {}'.format(os.environ['conf_additional_tags']))
-
if not project_conf['local_endpoint']:
# attach project_tag and endpoint_tag to endpoint
try:
- endpoint_id = get_instance_by_name(project_conf['tag_name'], '{0}-{1}-endpoint'.format(
- project_conf['service_base_name'], os.environ['endpoint_name']))
+ endpoint_id = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'], '{0}-{1}-endpoint'.format(
+ project_conf['service_base_name'], project_conf['endpoint_name']))
print("Endpoint id: " + endpoint_id)
ec2 = boto3.client('ec2')
- ec2.create_tags(Resources=[endpoint_id], Tags=[{'Key': 'project_tag', 'Value': project_conf['project_tag']},
- {'Key': 'endpoint_tag', 'Value': project_conf['endpoint_tag']}])
+ ec2.create_tags(Resources=[endpoint_id], Tags=[
+ {'Key': 'project_tag', 'Value': project_conf['project_tag']},
+ {'Key': 'endpoint_tag', 'Value': project_conf['endpoint_tag']}])
except Exception as err:
print("Failed to attach Project tag to Endpoint", str(err))
traceback.print_exc()
@@ -160,41 +180,43 @@
try:
project_conf['vpc2_id'] = os.environ['aws_vpc2_id']
- project_conf['tag_name'] = '{}-secondary-Tag'.format(project_conf['service_base_name'])
+ project_conf['tag_name'] = '{}-secondary-tag'.format(project_conf['service_base_name'])
except KeyError:
project_conf['vpc2_id'] = project_conf['vpc_id']
+
+
try:
logging.info('[CREATE SUBNET]')
print('[CREATE SUBNET]')
params = "--vpc_id '{}' --infra_tag_name {} --infra_tag_value {} --prefix {} " \
"--user_subnets_range '{}' --subnet_name {} --zone {}".format(
- project_conf['vpc2_id'], project_conf['tag_name'], project_conf['service_base_name'],
- project_conf['private_subnet_prefix'], os.environ['conf_user_subnets_range'],
- project_conf['private_subnet_name'],
- project_conf['zone'])
+ project_conf['vpc2_id'], project_conf['tag_name'], project_conf['service_base_name'],
+ project_conf['private_subnet_prefix'], os.environ['conf_user_subnets_range'],
+ project_conf['private_subnet_name'],
+ project_conf['zone'])
try:
local("~/scripts/{}.py {}".format('common_create_subnet', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create subnet.", str(err))
+ dlab.fab.append_result("Failed to create subnet.", str(err))
sys.exit(1)
tag = {"Key": project_conf['tag_name'],
- "Value": "{0}-{1}-subnet".format(project_conf['service_base_name'], project_conf['project_name'])}
- project_conf['private_subnet_cidr'] = get_subnet_by_tag(tag)
- subnet_id = get_subnet_by_cidr(project_conf['private_subnet_cidr'], project_conf['vpc2_id'])
- print('subnet id: {}'.format(subnet_id))
-
+ "Value": "{0}-{1}-{2}-subnet".format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])}
+ project_conf['private_subnet_cidr'] = dlab.meta_lib.get_subnet_by_tag(tag)
+ subnet_id = dlab.meta_lib.get_subnet_by_cidr(project_conf['private_subnet_cidr'], project_conf['vpc2_id'])
+ print('Subnet id: {}'.format(subnet_id))
print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
try:
logging.info('[CREATE EDGE ROLES]')
print('[CREATE EDGE ROLES]')
- user_tag = "{0}:{0}-{1}-edge-Role".format(project_conf['service_base_name'], project_conf['project_name'])
+ user_tag = "{0}:{0}-{1}-{2}-edge-role".format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
params = "--role_name {} --role_profile_name {} --policy_name {} --region {} --infra_tag_name {} " \
"--infra_tag_value {} --user_tag_value {}" \
.format(project_conf['edge_role_name'], project_conf['edge_role_profile_name'],
@@ -206,14 +228,14 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to creating roles.", str(err))
+ dlab.fab.append_result("Failed to creating roles.", str(err))
sys.exit(1)
try:
logging.info('[CREATE BACKEND (NOTEBOOK) ROLES]')
print('[CREATE BACKEND (NOTEBOOK) ROLES]')
- user_tag = "{0}:{0}-{1}-{2}-nb-de-Role".format(project_conf['service_base_name'], project_conf['project_name'],os.environ['endpoint_name'])
+ user_tag = "{0}:{0}-{1}-{2}-nb-de-role".format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
params = "--role_name {} --role_profile_name {} --policy_name {} --region {} --infra_tag_name {} " \
"--infra_tag_value {} --user_tag_value {}" \
.format(project_conf['notebook_dataengine_role_name'],
@@ -226,15 +248,14 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to creating roles.", str(err))
- remove_all_iam_resources('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed to creating roles.", str(err))
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
sys.exit(1)
try:
logging.info('[CREATE SECURITY GROUP FOR EDGE NODE]')
print('[CREATE SECURITY GROUPS FOR EDGE]')
- edge_sg_ingress = format_sg([
+ edge_sg_ingress = dlab.meta_lib.format_sg([
{
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": project_conf['private_subnet_cidr']}],
@@ -271,7 +292,7 @@
"PrefixListIds": []
}
])
- edge_sg_egress = format_sg([
+ edge_sg_egress = dlab.meta_lib.format_sg([
{
"PrefixListIds": [],
"FromPort": 22,
@@ -390,26 +411,26 @@
local("~/scripts/{}.py {}".format('common_create_security_group', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed creating security group for edge node.", str(err))
+ dlab.fab.append_result("Failed creating security group for edge node.", str(err))
raise Exception
with hide('stderr', 'running', 'warnings'):
print('Waiting for changes to propagate')
time.sleep(10)
except:
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
sys.exit(1)
try:
logging.info('[CREATE SECURITY GROUP FOR PRIVATE SUBNET]')
print('[CREATE SECURITY GROUP FOR PRIVATE SUBNET]')
- project_group_id = check_security_group(project_conf['edge_security_group_name'])
+ project_group_id = dlab.meta_lib.check_security_group(project_conf['edge_security_group_name'])
sg_list = project_conf['sg_ids'].replace(" ", "").split(',')
rules_list = []
for i in sg_list:
rules_list.append({"GroupId": i})
- private_sg_ingress = format_sg([
+ private_sg_ingress = dlab.meta_lib.format_sg([
{
"IpProtocol": "-1",
"IpRanges": [],
@@ -430,7 +451,7 @@
}
])
- private_sg_egress = format_sg([
+ private_sg_egress = dlab.meta_lib.format_sg([
{
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": project_conf['private_subnet_cidr']}],
@@ -475,12 +496,11 @@
print('Waiting for changes to propagate')
time.sleep(10)
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating security group for private subnet.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_sgroups(project_conf['notebook_instance_name'])
- remove_sgroups(project_conf['edge_instance_name'])
+ dlab.fab.append_result("Failed creating security group for private subnet.", str(err))
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
sys.exit(1)
logging.info('[CREATING SECURITY GROUPS FOR MASTER NODE]')
@@ -498,12 +518,11 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create sg.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_sgroups(project_conf['notebook_instance_name'])
- remove_sgroups(project_conf['edge_instance_name'])
+ dlab.fab.append_result("Failed to create sg.", str(err))
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
sys.exit(1)
logging.info('[CREATING SECURITY GROUPS FOR SLAVE NODES]')
@@ -521,47 +540,48 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create security group.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_sgroups(project_conf['dataengine_instances_name'])
- remove_sgroups(project_conf['notebook_instance_name'])
- remove_sgroups(project_conf['edge_instance_name'])
+ dlab.fab.append_result("Failed to create security group.", str(err))
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
sys.exit(1)
try:
logging.info('[CREATE BUCKETS]')
print('[CREATE BUCKETS]')
- project_conf['shared_bucket_tags'] = 'endpoint_tag:{0};{1}:{2};{3}:{4}{5}'.format(project_conf['endpoint_tag'],
- os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
- project_conf['tag_name'], project_conf['shared_bucket_name'], project_conf['bucket_additional_tags']).replace(';', ',')
+ project_conf['shared_bucket_tags'] = 'endpoint_tag:{0};{1}:{2};{3}:{4}{5}'.format(
+ project_conf['endpoint_tag'], os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
+ project_conf['tag_name'], project_conf['shared_bucket_name'],
+ project_conf['bucket_additional_tags']).replace(';', ',')
params = "--bucket_name {} --bucket_tags {} --region {} --bucket_name_tag {}". \
- format(project_conf['shared_bucket_name'], project_conf['shared_bucket_tags'], project_conf['region'], project_conf['shared_bucket_name_tag'])
+ format(project_conf['shared_bucket_name'], project_conf['shared_bucket_tags'], project_conf['region'],
+ project_conf['shared_bucket_name_tag'])
try:
local("~/scripts/{}.py {}".format('common_create_bucket', params))
except:
traceback.print_exc()
raise Exception
- project_conf['bucket_tags'] = 'endpoint_tag:{0};{1}:{2};project_tag:{3};{4}:{5}{6}'.format(project_conf['endpoint_tag'],
- os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
- project_conf['project_tag'],
- project_conf['tag_name'], project_conf['bucket_name'], project_conf['bucket_additional_tags']).replace(';', ',')
+ project_conf['bucket_tags'] = 'endpoint_tag:{0};{1}:{2};project_tag:{3};{4}:{5}{6}'.format(
+ project_conf['endpoint_tag'], os.environ['conf_billing_tag_key'], os.environ['conf_billing_tag_value'],
+ project_conf['project_tag'], project_conf['tag_name'], project_conf['bucket_name'],
+ project_conf['bucket_additional_tags']).replace(';', ',')
params = "--bucket_name {} --bucket_tags {} --region {} --bucket_name_tag {}" \
- .format(project_conf['bucket_name'], project_conf['bucket_tags'], project_conf['region'], project_conf['bucket_name_tag'])
+ .format(project_conf['bucket_name'], project_conf['bucket_tags'], project_conf['region'],
+ project_conf['bucket_name_tag'])
try:
local("~/scripts/{}.py {}".format('common_create_bucket', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create buckets.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_sgroups(project_conf['dataengine_instances_name'])
- remove_sgroups(project_conf['notebook_instance_name'])
- remove_sgroups(project_conf['edge_instance_name'])
+ dlab.fab.append_result("Failed to create buckets.", str(err))
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
sys.exit(1)
try:
@@ -569,26 +589,24 @@
print('[CREATING BUCKET POLICY FOR USER INSTANCES]')
params = '--bucket_name {} --shared_bucket_name {} --username {} --edge_role_name {} ' \
'--notebook_role_name {} --service_base_name {} --region {} ' \
- '--user_predefined_s3_policies "{}"'.format(project_conf['bucket_name'],
- project_conf['shared_bucket_name'],
- os.environ['project_name'], project_conf['edge_role_name'],
- project_conf['notebook_dataengine_role_name'],
- project_conf['service_base_name'], project_conf['region'],
- os.environ['aws_user_predefined_s3_policies'])
+ '--user_predefined_s3_policies "{}" --endpoint_name {}'.format(
+ project_conf['bucket_name'], project_conf['shared_bucket_name'], project_conf['project_name'],
+ project_conf['edge_role_name'], project_conf['notebook_dataengine_role_name'],
+ project_conf['service_base_name'], project_conf['region'],
+ os.environ['aws_user_predefined_s3_policies'], project_conf['endpoint_name'])
try:
local("~/scripts/{}.py {}".format('common_create_policy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create bucket policy.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_sgroups(project_conf['dataengine_instances_name'])
- remove_sgroups(project_conf['notebook_instance_name'])
- remove_sgroups(project_conf['edge_instance_name'])
- remove_s3('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed to create bucket policy.", str(err))
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+ dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
sys.exit(1)
try:
@@ -602,27 +620,27 @@
project_conf['edge_instance_name'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
- edge_instance = get_instance_by_name(project_conf['tag_name'], project_conf['edge_instance_name'])
+ edge_instance = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+ project_conf['edge_instance_name'])
except:
traceback.print_exc()
raise Exception
-
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create instance.", str(err))
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_sgroups(project_conf['dataengine_instances_name'])
- remove_sgroups(project_conf['notebook_instance_name'])
- remove_sgroups(project_conf['edge_instance_name'])
- remove_s3('edge', os.environ['project_name'])
+ dlab.fab.append_result("Failed to create instance.", str(err))
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+ dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
sys.exit(1)
if project_conf['network_type'] == 'public':
try:
logging.info('[ASSOCIATING ELASTIC IP]')
print('[ASSOCIATING ELASTIC IP]')
- project_conf['edge_id'] = get_instance_by_name(project_conf['tag_name'], project_conf['edge_instance_name'])
+ project_conf['edge_id'] = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+ project_conf['edge_instance_name'])
try:
project_conf['elastic_ip'] = os.environ['edge_elastic_ip']
except:
@@ -636,19 +654,19 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to associate elastic ip.", str(err))
+ dlab.fab.append_result("Failed to associate elastic ip.", str(err))
try:
- project_conf['edge_public_ip'] = get_instance_ip_address(project_conf['tag_name'],
- project_conf['edge_instance_name']).get('Public')
- project_conf['allocation_id'] = get_allocation_id_by_elastic_ip(project_conf['edge_public_ip'])
+ project_conf['edge_public_ip'] = dlab.meta_lib.get_instance_ip_address(
+ project_conf['tag_name'], project_conf['edge_instance_name']).get('Public')
+ project_conf['allocation_id'] = dlab.meta_lib.get_allocation_id_by_elastic_ip(
+ project_conf['edge_public_ip'])
except:
print("No Elastic IPs to release!")
- remove_ec2(project_conf['tag_name'], project_conf['edge_instance_name'])
- remove_all_iam_resources('notebook', os.environ['project_name'])
- remove_all_iam_resources('edge', os.environ['project_name'])
- remove_sgroups(project_conf['dataengine_instances_name'])
- remove_sgroups(project_conf['notebook_instance_name'])
- remove_sgroups(project_conf['edge_instance_name'])
- remove_s3('edge', os.environ['project_name'])
- sys.exit(1)
\ No newline at end of file
+ dlab.actions_lib.remove_ec2(project_conf['tag_name'], project_conf['edge_instance_name'])
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_conf['project_name'])
+ dlab.actions_lib.remove_all_iam_resources('edge', project_conf['project_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['dataengine_instances_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['notebook_instance_name'])
+ dlab.actions_lib.remove_sgroups(project_conf['edge_instance_name'])
+ dlab.actions_lib.remove_s3('edge', project_conf['project_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
index c0ec4d5..3495b13 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/project_terminate.py
@@ -22,18 +22,22 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import traceback
+import logging
import boto3
import requests
-def terminate_edge_node(tag_name, project_name, tag_value, nb_sg, edge_sg, de_sg, emr_sg):
+def terminate_edge_node(tag_name, project_name, tag_value, nb_sg, edge_sg, de_sg, emr_sg, endpoint_name):
print('Terminating EMR cluster')
try:
- clusters_list = get_emr_list(tag_name)
+ clusters_list = dlab.meta_lib.get_emr_list(tag_name)
if clusters_list:
for cluster_id in clusters_list:
client = boto3.client('emr')
@@ -41,89 +45,112 @@
cluster = cluster.get("Cluster")
emr_name = cluster.get('Name')
if '{}'.format(tag_value[:-1]) in emr_name:
- terminate_emr(cluster_id)
+ dlab.actions_lib.terminate_emr(cluster_id)
print("The EMR cluster {} has been terminated successfully".format(emr_name))
else:
print("There are no EMR clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
sys.exit(1)
print("Terminating EDGE and notebook instances")
try:
- remove_ec2(tag_name, tag_value)
- except:
+ dlab.actions_lib.remove_ec2(tag_name, tag_value)
+ except Exception as err:
+ dlab.fab.append_result("Failed to terminate instances.", str(err))
sys.exit(1)
print("Removing s3 bucket")
try:
- remove_s3('edge', project_name)
- except:
+ dlab.actions_lib.remove_s3('edge', project_name)
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove buckets.", str(err))
sys.exit(1)
print("Removing IAM roles and profiles")
try:
- remove_all_iam_resources('notebook', project_name)
- remove_all_iam_resources('edge', project_name)
- except:
+ dlab.actions_lib.remove_all_iam_resources('notebook', project_name, endpoint_name)
+ dlab.actions_lib.remove_all_iam_resources('edge', project_name, endpoint_name)
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove IAM roles and profiles.", str(err))
+ sys.exit(1)
+
+ print("Deregistering project specific notebook's AMI")
+ try:
+ dlab.actions_lib.deregister_image(project_name)
+ except Exception as err:
+ dlab.fab.append_result("Failed to deregister images.", str(err))
sys.exit(1)
print("Removing security groups")
try:
- remove_sgroups(emr_sg)
- remove_sgroups(de_sg)
- remove_sgroups(nb_sg)
- remove_sgroups(edge_sg)
- except:
+ dlab.actions_lib.remove_sgroups(emr_sg)
+ dlab.actions_lib.remove_sgroups(de_sg)
+ dlab.actions_lib.remove_sgroups(nb_sg)
+ dlab.actions_lib.remove_sgroups(edge_sg)
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove Security Groups.", str(err))
sys.exit(1)
print("Removing private subnet")
try:
- remove_subnets(tag_value)
- except:
+ dlab.actions_lib.remove_subnets(tag_value)
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove subnets.", str(err))
sys.exit(1)
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/project/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
project_conf = dict()
- project_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- project_conf['endpoint_name'] = '{}-{}-endpoint'.format(project_conf['service_base_name'], os.environ['endpoint_name'])
+ project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
project_conf['project_name'] = os.environ['project_name']
- project_conf['tag_name'] = project_conf['service_base_name'] + '-Tag'
- project_conf['tag_value'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-*'
- project_conf['edge_sg'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-edge'
- project_conf['nb_sg'] = project_conf['service_base_name'] + "-" + os.environ['project_name'] + '-nb'
+ project_conf['endpoint_name'] = os.environ['endpoint_name']
+ project_conf['endpoint_instance_name'] = '{}-{}-endpoint'.format(project_conf['service_base_name'],
+ project_conf['endpoint_name'])
+ project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
+ project_conf['tag_value'] = '{}-{}-{}-*'.format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['edge_sg'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['nb_sg'] = '{}-{}-{}-nb'.format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
project_conf['edge_instance_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- project_conf['de_sg'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + \
- '-dataengine*'
- project_conf['emr_sg'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-des-*'
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['de_sg'] = '{}-{}-{}-de*'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['emr_sg'] = '{}-{}-{}-des-*'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
try:
logging.info('[TERMINATE PROJECT]')
print('[TERMINATE PROJECT]')
try:
terminate_edge_node(project_conf['tag_name'], project_conf['project_name'], project_conf['tag_value'],
- project_conf['nb_sg'], project_conf['edge_sg'], project_conf['de_sg'], project_conf['emr_sg'])
+ project_conf['nb_sg'], project_conf['edge_sg'], project_conf['de_sg'],
+ project_conf['emr_sg'], project_conf['endpoint_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate project.", str(err))
+ dlab.fab.append_result("Failed to terminate project.", str(err))
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
try:
- endpoint_id = get_instance_by_name(project_conf['tag_name'], project_conf['endpoint_name'])
+ endpoint_id = dlab.meta_lib.get_instance_by_name(project_conf['tag_name'],
+ project_conf['endpoint_instance_name'])
print("Endpoint id: " + endpoint_id)
ec2 = boto3.client('ec2')
ec2.delete_tags(Resources=[endpoint_id], Tags=[{'Key': 'project_tag'}, {'Key': 'endpoint_tag'}])
@@ -148,7 +175,8 @@
}
client_params = {
- "clientId": project_conf['service_base_name'] + '-' + project_conf['project_name'] + '-' + os.environ['endpoint_name'],
+ "clientId": '{}-{}-{}'.format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
}
keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
@@ -163,8 +191,10 @@
os.environ['keycloak_realm_name'],
keycloak_id_client)
- keycloak_client = requests.delete(keycloak_client_delete_url, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"),
- "Content-Type": "application/json"})
+ keycloak_client = requests.delete(
+ keycloak_client_delete_url,
+ headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+ "Content-Type": "application/json"})
except Exception as err:
print("Failed to remove project client from Keycloak", str(err))
@@ -175,6 +205,6 @@
"Action": "Terminate edge node"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
index 574ad47..dd2a93c 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/rstudio_configure.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -43,70 +45,78 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['network_type'] = os.environ['conf_network_type']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
- notebook_config['rstudio_pass'] = id_generator()
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['network_type'] = os.environ['conf_network_type']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- if notebook_config['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ edge_instance_name).get('Private')
+ notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ edge_instance_name)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -114,9 +124,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -133,9 +142,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -151,9 +159,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring R_STUDIO and all dependencies
@@ -163,7 +170,7 @@
params = "--hostname {0} --keyfile {1} " \
"--region {2} --rstudio_pass {3} " \
"--rstudio_version {4} --os_user {5} " \
- "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+ "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
.format(instance_hostname, keyfile_name,
os.environ['aws_region'], notebook_config['rstudio_pass'],
os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -175,9 +182,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure rstudio.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure rstudio.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -193,9 +199,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -206,12 +211,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -227,9 +231,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -240,103 +243,110 @@
'tensor': False
}
params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
- .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio', notebook_config['exploratory_name'], json.dumps(additional_info))
+ .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio',
+ notebook_config['exploratory_name'], json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING AMI]')
- ami_id = get_ami_id_by_name(`notebook_config['expected_image_name']`)
+ ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
- os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
except KeyError:
os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
os.environ['project_name'], os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
else:
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
- os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+ os.environ['conf_additional_tags'], os.environ['endpoint_name'])
except KeyError:
os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
- # generating output information
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- rstudio_ip_url = "http://" + ip_address + ":8787/"
- rstudio_dns_url = "http://" + dns_name + ":8787/"
- rstudio_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
- rstudio_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private DNS: {}".format(dns_name))
- print("Private IP: {}".format(ip_address))
- print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(notebook_config['user_keyname']))
- print("AMI name: {}".format(notebook_config['notebook_image_name']))
- print("Profile name: {}".format(notebook_config['role_profile_name']))
- print("SG name: {}".format(notebook_config['security_group_name']))
- print("Rstudio URL: {}".format(rstudio_ip_url))
- print("Rstudio URL: {}".format(rstudio_dns_url))
- print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
- print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+ try:
+ # generating output information
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['instance_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+ rstudio_ip_url = "http://" + ip_address + ":8787/"
+ rstudio_dns_url = "http://" + dns_name + ":8787/"
+ rstudio_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ rstudio_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private DNS: {}".format(dns_name))
+ print("Private IP: {}".format(ip_address))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name'])))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['user_keyname']))
+ print("AMI name: {}".format(notebook_config['notebook_image_name']))
+ print("Profile name: {}".format(notebook_config['role_profile_name']))
+ print("SG name: {}".format(notebook_config['security_group_name']))
+ print("Rstudio URL: {}".format(rstudio_ip_url))
+ print("Rstudio URL: {}".format(rstudio_dns_url))
+ print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+ print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+ print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": dns_name,
- "ip": ip_address,
- "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "notebook_image_name": notebook_config['notebook_image_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "RStudio",
- "url": rstudio_notebook_access_url},
- {"description": "Ungit",
- "url": rstudio_ungit_access_url}#,
- #{"description": "RStudio (via tunnel)",
- # "url": rstudio_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ],
- "exploratory_user": notebook_config['dlab_ssh_user'],
- "exploratory_pass": notebook_config['rstudio_pass']}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": dns_name,
+ "ip": ip_address,
+ "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name']),
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "notebook_image_name": notebook_config['notebook_image_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "RStudio",
+ "url": rstudio_notebook_access_url},
+ {"description": "Ungit",
+ "url": rstudio_ungit_access_url}#,
+ #{"description": "RStudio (via tunnel)",
+ # "url": rstudio_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ],
+ "exploratory_user": notebook_config['dlab_ssh_user'],
+ "exploratory_pass": notebook_config['rstudio_pass']}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
index 80d4904..64fd85b 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_configure.py
@@ -21,12 +21,16 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os, json
+import logging
+import sys
+import os
from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import traceback
+import json
if __name__ == "__main__":
local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -34,84 +38,108 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
- instance = 'ssn'
+
+ ssn_conf = dict()
+ ssn_conf['instance'] = 'ssn'
+
+ def clear_resources():
+ if ssn_conf['domain_created']:
+ dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'],
+ os.environ['ssn_hosted_zone_name'],
+ os.environ['ssn_subdomain'])
+ dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+ dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+ dlab.actions_lib.remove_s3(ssn_conf['instance'])
+ if ssn_conf['pre_defined_sg']:
+ dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+ if ssn_conf['pre_defined_subnet']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
+ try:
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ except:
+ print("There are no VPC Endpoints")
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
try:
logging.info('[DERIVING NAMES]')
print('[DERIVING NAMES]')
- service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
- role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
- policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
- ssn_bucket_name_tag = service_base_name + '-ssn-bucket'
- default_endpoint_name = os.environ['default_endpoint_name']
- shared_bucket_name_tag = '{0}-{1}-shared-bucket'.format(service_base_name, default_endpoint_name)
- ssn_bucket_name = ssn_bucket_name_tag.lower().replace('_', '-')
- shared_bucket_name = shared_bucket_name_tag.lower().replace('_', '-')
- tag_name = service_base_name + '-Tag'
- tag2_name = service_base_name + '-secondary-Tag'
- instance_name = service_base_name + '-ssn'
- region = os.environ['aws_region']
- ssn_image_name = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
- ssn_ami_id = get_ami_id(ssn_image_name)
- policy_path = '/root/files/ssn_policy.json'
- vpc_cidr = os.environ['conf_vpc_cidr']
- vpc2_cidr = os.environ['conf_vpc2_cidr']
- sg_name = instance_name + '-sg'
- pre_defined_vpc = False
- pre_defined_subnet = False
- pre_defined_sg = False
- billing_enabled = True
- dlab_ssh_user = os.environ['conf_os_user']
- network_type = os.environ['conf_network_type']
+ ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and \
'ssn_subdomain' in os.environ:
- domain_created = True
+ ssn_conf['domain_created'] = True
else:
- domain_created = False
+ ssn_conf['domain_created'] = False
+ ssn_conf['pre_defined_vpc'] = False
+ ssn_conf['pre_defined_subnet'] = False
+ ssn_conf['pre_defined_sg'] = False
+ ssn_conf['billing_enabled'] = True
+ ssn_conf['role_name'] = '{}-ssn-role'.format(ssn_conf['service_base_name'])
+ ssn_conf['role_profile_name'] = '{}-ssn-profile'.format(ssn_conf['service_base_name'])
+ ssn_conf['policy_name'] = '{}-ssn-policy'.format(ssn_conf['service_base_name'])
+ ssn_conf['tag_name'] = '{}-tag'.format(ssn_conf['service_base_name'])
+ ssn_conf['tag2_name'] = '{}-secondary-tag'.format(ssn_conf['service_base_name'])
+ ssn_conf['user_tag'] = "{0}:{0}-ssn-role".format(ssn_conf['service_base_name'])
+ ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+ ssn_conf['region'] = os.environ['aws_region']
+ ssn_conf['ssn_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+ ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+ ssn_conf['sg_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+ ssn_conf['network_type'] = os.environ['conf_network_type']
+ ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
try:
if os.environ['aws_vpc_id'] == '':
raise KeyError
except KeyError:
- tag = {"Key": tag_name, "Value": "{}-subnet".format(service_base_name)}
- os.environ['aws_vpc_id'] = get_vpc_by_tag(tag_name, service_base_name)
- pre_defined_vpc = True
+ ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+ os.environ['aws_vpc_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'], ssn_conf['service_base_name'])
+ ssn_conf['pre_defined_vpc'] = True
try:
if os.environ['aws_subnet_id'] == '':
raise KeyError
except KeyError:
- tag = {"Key": tag_name, "Value": "{}-subnet".format(service_base_name)}
- os.environ['aws_subnet_id'] = get_subnet_by_tag(tag, True)
- pre_defined_subnet = True
+ ssn_conf['tag'] = {"Key": ssn_conf['tag_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+ os.environ['aws_subnet_id'] = dlab.meta_lib.get_subnet_by_tag(ssn_conf['tag'], True)
+ ssn_conf['pre_defined_subnet'] = True
try:
if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
raise KeyError
except KeyError:
- tag = {"Key": tag2_name, "Value": "{}-subnet".format(service_base_name)}
- os.environ['aws_vpc2_id'] = get_vpc_by_tag(tag2_name, service_base_name)
- pre_defined_vpc2 = True
+ ssn_conf['tag'] = {"Key": ssn_conf['tag2_name'], "Value": "{}-subnet".format(ssn_conf['service_base_name'])}
+ os.environ['aws_vpc2_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
+ ssn_conf['service_base_name'])
+ ssn_conf['pre_defined_vpc2'] = True
try:
if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_peering_id']:
raise KeyError
except KeyError:
- os.environ['aws_peering_id'] = get_peering_by_tag(tag_name, service_base_name)
- pre_defined_peering = True
+ os.environ['aws_peering_id'] = dlab.meta_lib.get_peering_by_tag(ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ ssn_conf['pre_defined_peering'] = True
try:
if os.environ['aws_security_groups_ids'] == '':
raise KeyError
except KeyError:
- os.environ['aws_security_groups_ids'] = get_security_group_by_name(sg_name)
- pre_defined_sg = True
+ os.environ['aws_security_groups_ids'] = dlab.meta_lib.get_security_group_by_name(ssn_conf['sg_name'])
+ ssn_conf['pre_defined_sg'] = True
try:
if os.environ['aws_account_id'] == '':
raise KeyError
if os.environ['aws_billing_bucket'] == '':
raise KeyError
except KeyError:
- billing_enabled = False
- if not billing_enabled:
+ ssn_conf['billing_enabled'] = False
+ if not ssn_conf['billing_enabled']:
os.environ['aws_account_id'] = 'None'
os.environ['aws_billing_bucket'] = 'None'
try:
@@ -120,36 +148,41 @@
except KeyError:
os.environ['aws_report_path'] = ''
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ clear_resources()
sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ ssn_conf['initial_user'] = 'ubuntu'
+ ssn_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ ssn_conf['initial_user'] = 'ec2-user'
+ ssn_conf['sudo_group'] = 'wheel'
- if network_type == 'private':
- instance_hostname = get_instance_ip_address(tag_name, instance_name).get('Private')
+ if ssn_conf['network_type'] == 'private':
+ ssn_conf['instance_hostname'] = dlab.meta_lib.get_instance_ip_address(
+ ssn_conf['tag_name'], ssn_conf['instance_name']).get('Private')
else:
- instance_hostname = get_instance_hostname(tag_name, instance_name)
+ ssn_conf['instance_hostname'] = dlab.meta_lib.get_instance_hostname(
+ ssn_conf['tag_name'], ssn_conf['instance_name'])
if os.environ['conf_stepcerts_enabled'] == 'true':
- step_cert_sans = ' --san {0} '.format(get_instance_ip_address(tag_name, instance_name).get('Private'))
- if network_type == 'public':
- step_cert_sans += ' --san {0} --san {1}'.format(
- get_instance_hostname(tag_name, instance_name),
- get_instance_ip_address(tag_name, instance_name).get('Public'))
+ ssn_conf['step_cert_sans'] = ' --san {0} '.format(dlab.meta_lib.get_instance_ip_address(
+ ssn_conf['tag_name'], ssn_conf['instance_name']).get('Private'))
+ if ssn_conf['network_type'] == 'public':
+ ssn_conf['step_cert_sans'] += ' --san {0} --san {1}'.format(
+ dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'], ssn_conf['instance_name']),
+ dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+ ssn_conf['instance_name']).get('Public'))
else:
- step_cert_sans = ''
+ ssn_conf['step_cert_sans'] = ''
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- dlab_ssh_user, sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ ssn_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ ssn_conf['initial_user'], ssn_conf['dlab_ssh_user'], ssn_conf['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -157,30 +190,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Failed creating ssh user 'dlab'.", str(err))
- if domain_created:
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'])
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -188,8 +199,8 @@
print('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
params = "--hostname {} --keyfile {} --pip_packages 'boto3 backoff argparse fabric==1.14.0 awscli pymongo " \
"pyyaml jinja2' --user {} --region {}". \
- format(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", dlab_ssh_user,
- os.environ['aws_region'])
+ format(ssn_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ ssn_conf['dlab_ssh_user'], os.environ['aws_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
@@ -197,44 +208,24 @@
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Failed installing software: pip, packages.", str(err))
- if domain_created:
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'])
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
+ clear_resources()
sys.exit(1)
try:
logging.info('[CONFIGURE SSN INSTANCE]')
print('[CONFIGURE SSN INSTANCE]')
- additional_config = {"nginx_template_dir": "/root/templates/", "service_base_name": service_base_name,
+ additional_config = {"nginx_template_dir": "/root/templates/", "service_base_name":
+ ssn_conf['service_base_name'],
"security_group_id": os.environ['aws_security_groups_ids'],
"vpc_id": os.environ['aws_vpc_id'], "subnet_id": os.environ['aws_subnet_id'],
"admin_key": os.environ['conf_key_name']}
params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
"--tag_resource_id {} --step_cert_sans '{}' ".format(
- instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
- json.dumps(additional_config), dlab_ssh_user, os.environ['ssn_dlab_path'],
- os.environ['conf_tag_resource_id'], step_cert_sans)
+ ssn_conf['instance_hostname'],
+ "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ json.dumps(additional_config), ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
+ os.environ['conf_tag_resource_id'], ssn_conf['step_cert_sans'])
try:
local("~/scripts/{}.py {}".format('configure_ssn_node', params))
@@ -242,30 +233,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Failed configuring ssn.", str(err))
- if domain_created:
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'])
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.fab.append_result("Failed configuring ssn.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -285,75 +254,29 @@
{"name": "dataengine-service", "tag": "latest"},
{"name": "dataengine", "tag": "latest"}]
params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
- "--cloud_provider {} --region {} --gcr_creds {} --odahu_image {}".format(instance_hostname,
+ "--cloud_provider {} --region {} --gcr_creds {} --odahu_image {}".format(ssn_conf['instance_hostname'],
"{}{}.pem".format(os.environ['conf_key_dir'],
os.environ['conf_key_name']),
json.dumps(additional_config), os.environ['conf_os_family'],
- dlab_ssh_user, os.environ['ssn_dlab_path'],
+ ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
os.environ['conf_cloud_provider'], os.environ['aws_region'],
os.environ['ssn_gcr_creds'], os.environ['odahu_deploy_image'])
-
try:
local("~/scripts/{}.py {}".format('configure_docker', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Unable to configure docker.", str(err))
- if domain_created:
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'])
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.fab.append_result("Unable to configure docker.", str(err))
+ clear_resources()
sys.exit(1)
try:
- # mongo_parameters = {
- # "aws_region": os.environ['aws_region'],
- # "aws_vpc_id": os.environ['aws_vpc_id'],
- # "aws_subnet_id": os.environ['aws_subnet_id'],
- # "conf_service_base_name": service_base_name,
- # "aws_security_groups_ids": os.environ['aws_security_groups_ids'].replace(" ", ""),
- # "conf_os_family": os.environ['conf_os_family'],
- # "conf_tag_resource_id": os.environ['conf_tag_resource_id'],
- # "conf_key_dir": os.environ['conf_key_dir'],
- # "ssn_instance_size": os.environ['aws_ssn_instance_size'],
- # "edge_instance_size": os.environ['aws_edge_instance_size']
- # }
- # if os.environ['conf_duo_vpc_enable'] == 'true':
- # secondary_parameters = {
- # "aws_notebook_vpc_id": os.environ['aws_vpc2_id'],
- # "aws_notebook_subnet_id": os.environ['aws_subnet_id'],
- # "aws_peering_id": os.environ['aws_peering_id']
- # }
- # else:
- # secondary_parameters = {
- # "aws_notebook_vpc_id": os.environ['aws_vpc_id'],
- # "aws_notebook_subnet_id": os.environ['aws_subnet_id'],
- # }
- # mongo_parameters.update(secondary_parameters)
cloud_params = [
{
'key': 'KEYCLOAK_REDIRECT_URI',
- 'value': "https://{0}/".format(get_instance_hostname(tag_name, instance_name))
+ 'value': "https://{0}/".format(dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'],
+ ssn_conf['instance_name']))
},
{
'key': 'KEYCLOAK_REALM_NAME',
@@ -468,10 +391,6 @@
'value': ''
},
{
- 'key': 'SHARED_IMAGE_ENABLED',
- 'value': os.environ['conf_shared_image_enabled']
- },
- {
'key': 'CONF_IMAGE_ENABLED',
'value': os.environ['conf_image_enabled']
},
@@ -592,14 +511,14 @@
"--resource_id {} " \
"--default_endpoint_name {} " \
"--tags {}". \
- format(instance_hostname,
+ format(ssn_conf['instance_hostname'],
"{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
os.environ['ssn_dlab_path'],
- dlab_ssh_user,
+ ssn_conf['dlab_ssh_user'],
os.environ['conf_os_family'],
os.environ['request_id'],
os.environ['conf_resource'],
- service_base_name,
+ ssn_conf['service_base_name'],
os.environ['conf_tag_resource_id'],
os.environ['conf_billing_tag'],
os.environ['conf_cloud_provider'],
@@ -607,7 +526,7 @@
os.environ['aws_billing_bucket'],
os.environ['aws_job_enabled'],
os.environ['aws_report_path'],
- billing_enabled,
+ ssn_conf['billing_enabled'],
json.dumps(cloud_params),
os.environ['dlab_id'],
os.environ['usage_date'],
@@ -624,57 +543,36 @@
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Unable to configure UI.", str(err))
- print(err)
- if domain_created:
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'])
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.fab.append_result("Unable to configure UI.", str(err))
+ clear_resources()
sys.exit(1)
try:
logging.info('[SUMMARY]')
print('[SUMMARY]')
- print("Service base name: {}".format(service_base_name))
- print("SSN Name: {}".format(instance_name))
- print("SSN Hostname: {}".format(instance_hostname))
- print("Role name: {}".format(role_name))
- print("Role profile name: {}".format(role_profile_name))
- print("Policy name: {}".format(policy_name))
+ print("Service base name: {}".format(ssn_conf['service_base_name']))
+ print("SSN Name: {}".format(ssn_conf['instance_name']))
+ print("SSN Hostname: {}".format(ssn_conf['instance_hostname']))
+ print("Role name: {}".format(ssn_conf['role_name']))
+ print("Role profile name: {}".format(ssn_conf['role_profile_name']))
+ print("Policy name: {}".format(ssn_conf['policy_name']))
print("Key name: {}".format(os.environ['conf_key_name']))
print("VPC ID: {}".format(os.environ['aws_vpc_id']))
print("Subnet ID: {}".format(os.environ['aws_subnet_id']))
print("Security IDs: {}".format(os.environ['aws_security_groups_ids']))
print("SSN instance shape: {}".format(os.environ['aws_ssn_instance_size']))
- print("SSN AMI name: {}".format(ssn_image_name))
- print("SSN bucket name: {}".format(ssn_bucket_name))
- print("Shared bucket name: {}".format(shared_bucket_name))
- print("Region: {}".format(region))
- jenkins_url = "http://{}/jenkins".format(get_instance_hostname(tag_name, instance_name))
- jenkins_url_https = "https://{}/jenkins".format(get_instance_hostname(tag_name, instance_name))
- print("Jenkins URL: {}".format(jenkins_url))
- print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
- print("DLab UI HTTP URL: http://{}".format(get_instance_hostname(tag_name, instance_name)))
- print("DLab UI HTTPS URL: https://{}".format(get_instance_hostname(tag_name, instance_name)))
+ print("SSN AMI name: {}".format(ssn_conf['ssn_image_name']))
+ print("Region: {}".format(ssn_conf['region']))
+ ssn_conf['jenkins_url'] = "http://{}/jenkins".format(dlab.meta_lib.get_instance_hostname(
+ ssn_conf['tag_name'], ssn_conf['instance_name']))
+ ssn_conf['jenkins_url_https'] = "https://{}/jenkins".format(dlab.meta_lib.get_instance_hostname(
+ ssn_conf['tag_name'], ssn_conf['instance_name']))
+ print("Jenkins URL: {}".format(ssn_conf['jenkins_url']))
+ print("Jenkins URL HTTPS: {}".format(ssn_conf['jenkins_url_https']))
+ print("DLab UI HTTP URL: http://{}".format(dlab.meta_lib.get_instance_hostname(
+ ssn_conf['tag_name'], ssn_conf['instance_name'])))
+ print("DLab UI HTTPS URL: https://{}".format(dlab.meta_lib.get_instance_hostname(
+ ssn_conf['tag_name'], ssn_conf['instance_name'])))
try:
with open('jenkins_creds.txt') as f:
print(f.read())
@@ -682,26 +580,26 @@
print("Jenkins is either configured already or have issues in configuration routine.")
with open("/root/result.json", 'w') as f:
- res = {"service_base_name": service_base_name,
- "instance_name": instance_name,
- "instance_hostname": get_instance_hostname(tag_name, instance_name),
- "role_name": role_name,
- "role_profile_name": role_profile_name,
- "policy_name": policy_name,
+ res = {"service_base_name": ssn_conf['service_base_name'],
+ "instance_name": ssn_conf['instance_name'],
+ "instance_hostname": dlab.meta_lib.get_instance_hostname(ssn_conf['tag_name'],
+ ssn_conf['instance_name']),
+ "role_name": ssn_conf['role_name'],
+ "role_profile_name": ssn_conf['role_profile_name'],
+ "policy_name": ssn_conf['policy_name'],
"master_keyname": os.environ['conf_key_name'],
"vpc_id": os.environ['aws_vpc_id'],
"subnet_id": os.environ['aws_subnet_id'],
"security_id": os.environ['aws_security_groups_ids'],
"instance_shape": os.environ['aws_ssn_instance_size'],
- "bucket_name": ssn_bucket_name,
- "shared_bucket_name": shared_bucket_name,
- "region": region,
+ "region": ssn_conf['region'],
"action": "Create SSN instance"}
f.write(json.dumps(res))
print('Upload response file')
params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
- format(instance_name, local_log_filepath, dlab_ssh_user, instance_hostname)
+ format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'],
+ ssn_conf['instance_hostname'])
local("~/scripts/{}.py {}".format('upload_response_file', params))
logging.info('[FINALIZE]')
@@ -710,28 +608,7 @@
if os.environ['conf_lifecycle_stage'] == 'prod':
params += "--key_id {}".format(os.environ['aws_access_key'])
local("~/scripts/{}.py {}".format('ssn_finalize', params))
- except:
- if domain_created:
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'])
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ clear_resources()
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
index 8615a25..45c65f2 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_prepare.py
@@ -21,333 +21,349 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import logging
+import sys
+import os
from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
+import json
if __name__ == "__main__":
local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
- local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+ local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
- instance = 'ssn'
- pre_defined_vpc = False
- pre_defined_subnet = False
- pre_defined_sg = False
- pre_defined_vpc2 = False
+ ssn_conf = dict()
+ ssn_conf['instance'] = 'ssn'
+ ssn_conf['pre_defined_vpc'] = False
+ ssn_conf['pre_defined_subnet'] = False
+ ssn_conf['pre_defined_sg'] = False
+ ssn_conf['pre_defined_vpc2'] = False
try:
logging.info('[CREATE AWS CONFIG FILE]')
print('[CREATE AWS CONFIG FILE]')
if 'aws_access_key' in os.environ and 'aws_secret_access_key' in os.environ:
- create_aws_config_files(generate_full_config=True)
+ dlab.actions_lib.create_aws_config_files(generate_full_config=True)
else:
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
except Exception as err:
- print('Error: {0}'.format(err))
logging.info('Unable to create configuration')
- append_result("Unable to create configuration")
+ dlab.fab.append_result("Unable to create configuration", err)
traceback.print_exc()
sys.exit(1)
try:
logging.info('[DERIVING NAMES]')
print('[DERIVING NAMES]')
- service_base_name = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- role_name = service_base_name.lower().replace('-', '_') + '-ssn-Role'
- role_profile_name = service_base_name.lower().replace('-', '_') + '-ssn-Profile'
- policy_name = service_base_name.lower().replace('-', '_') + '-ssn-Policy'
- default_endpoint_name = os.environ['default_endpoint_name']
- tag_name = service_base_name + '-Tag'
- tag2_name = service_base_name + '-secondary-Tag'
- user_tag = "{0}:{0}-ssn-Role".format(service_base_name)
- instance_name = service_base_name + '-ssn'
- region = os.environ['aws_region']
- zone_full = os.environ['aws_region'] + os.environ['aws_zone']
- ssn_image_name = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
- ssn_ami_id = get_ami_id(ssn_image_name)
- policy_path = '/root/files/ssn_policy.json'
- vpc_cidr = os.environ['conf_vpc_cidr']
- vpc2_cidr = os.environ['conf_vpc2_cidr']
- vpc_name = '{}-VPC'.format(service_base_name)
- vpc2_name = '{}-secondary-VPC'.format(service_base_name)
- subnet_name = '{}-subnet'.format(service_base_name)
- allowed_ip_cidr = list()
+ ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
+ ssn_conf['role_name'] = '{}-ssn-role'.format(ssn_conf['service_base_name'])
+ ssn_conf['role_profile_name'] = '{}-ssn-profile'.format(ssn_conf['service_base_name'])
+ ssn_conf['policy_name'] = '{}-ssn-policy'.format(ssn_conf['service_base_name'])
+ ssn_conf['tag_name'] = '{}-tag'.format(ssn_conf['service_base_name'])
+ ssn_conf['tag2_name'] = '{}-secondary-tag'.format(ssn_conf['service_base_name'])
+ ssn_conf['user_tag'] = "{0}:{0}-ssn-role".format(ssn_conf['service_base_name'])
+ ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+ ssn_conf['region'] = os.environ['aws_region']
+ ssn_conf['zone_full'] = os.environ['aws_region'] + os.environ['aws_zone']
+ ssn_conf['ssn_image_name'] = os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])]
+ ssn_conf['ssn_ami_id'] = dlab.meta_lib.get_ami_id(ssn_conf['ssn_image_name'])
+ ssn_conf['policy_path'] = '/root/files/ssn_policy.json'
+ ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+ ssn_conf['vpc2_cidr'] = os.environ['conf_vpc2_cidr']
+ ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
+ ssn_conf['vpc2_name'] = '{}-vpc2'.format(ssn_conf['service_base_name'])
+ ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+ ssn_conf['allowed_ip_cidr'] = list()
for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
- allowed_ip_cidr.append({"CidrIp": cidr.replace(' ','')})
- sg_name = instance_name + '-sg'
- network_type = os.environ['conf_network_type']
- all_ip_cidr = '0.0.0.0/0'
- elastic_ip_name = '{0}-ssn-EIP'.format(service_base_name)
+ ssn_conf['allowed_ip_cidr'].append({"CidrIp": cidr.replace(' ','')})
+ ssn_conf['sg_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+ ssn_conf['network_type'] = os.environ['conf_network_type']
+ ssn_conf['all_ip_cidr'] = '0.0.0.0/0'
+ ssn_conf['elastic_ip_name'] = '{0}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ sys.exit(1)
- if get_instance_by_name(tag_name, instance_name):
- print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+ if dlab.meta_lib.get_instance_by_name(ssn_conf['tag_name'], ssn_conf['instance_name']):
+ print("Service base name should be unique and less or equal 20 symbols. Please try again.")
+ sys.exit(1)
+
+ try:
+ if not os.environ['aws_vpc_id']:
+ raise KeyError
+ except KeyError:
+ try:
+ ssn_conf['pre_defined_vpc'] = True
+ logging.info('[CREATE VPC AND ROUTE TABLE]')
+ print('[CREATE VPC AND ROUTE TABLE]')
+ params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --vpc_name {}".format(
+ ssn_conf['vpc_cidr'], ssn_conf['region'], ssn_conf['tag_name'], ssn_conf['service_base_name'],
+ ssn_conf['vpc_name'])
+ try:
+ local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+ except:
+ traceback.print_exc()
+ raise Exception
+ os.environ['aws_vpc_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ except Exception as err:
+ dlab.fab.append_result("Failed to create VPC", str(err))
sys.exit(1)
+ ssn_conf['allowed_vpc_cidr_ip_ranges'] = list()
+ for cidr in dlab.meta_lib.get_vpc_cidr_by_id(os.environ['aws_vpc_id']):
+ ssn_conf['allowed_vpc_cidr_ip_ranges'].append({"CidrIp": cidr})
+
+ try:
+ if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
+ raise KeyError
+ except KeyError:
try:
- if not os.environ['aws_vpc_id']:
- raise KeyError
- except KeyError:
+ ssn_conf['pre_defined_vpc2'] = True
+ logging.info('[CREATE SECONDARY VPC AND ROUTE TABLE]')
+ print('[CREATE SECONDARY VPC AND ROUTE TABLE]')
+ params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary " \
+ "--vpc_name {}".format(ssn_conf['vpc2_cidr'], ssn_conf['region'], ssn_conf['tag2_name'],
+ ssn_conf['service_base_name'], ssn_conf['vpc2_name'])
try:
- pre_defined_vpc = True
- logging.info('[CREATE VPC AND ROUTE TABLE]')
- print('[CREATE VPC AND ROUTE TABLE]')
- params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --vpc_name {}".format(
- vpc_cidr, region, tag_name, service_base_name, vpc_name)
+ local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+ except:
+ traceback.print_exc()
+ raise Exception
+ os.environ['aws_vpc2_id'] = dlab.meta_lib.get_vpc_by_tag(ssn_conf['tag2_name'],
+ ssn_conf['service_base_name'])
+ except Exception as err:
+ dlab.fab.append_result("Failed to create secondary VPC.", str(err))
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ sys.exit(1)
+
+ try:
+ if os.environ['aws_subnet_id'] == '':
+ raise KeyError
+ except KeyError:
+ try:
+ ssn_conf['pre_defined_subnet'] = True
+ logging.info('[CREATE SUBNET]')
+ print('[CREATE SUBNET]')
+ params = "--vpc_id {0} --username {1} --infra_tag_name {2} --infra_tag_value {3} --prefix {4} " \
+ "--ssn {5} --zone {6} --subnet_name {7}".format(
+ os.environ['aws_vpc_id'], 'ssn', ssn_conf['tag_name'],ssn_conf['service_base_name'], '20',
+ True, ssn_conf['zone_full'], ssn_conf['subnet_name'])
+ try:
+ local("~/scripts/{}.py {}".format('common_create_subnet', params))
+ except:
+ traceback.print_exc()
+ raise Exception
+ with open('/tmp/ssn_subnet_id', 'r') as f:
+ os.environ['aws_subnet_id'] = f.read()
+ dlab.actions_lib.enable_auto_assign_ip(os.environ['aws_subnet_id'])
+ except Exception as err:
+ dlab.fab.append_result("Failed to create Subnet.", str(err))
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
try:
- local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
except:
- traceback.print_exc()
- raise Exception
- os.environ['aws_vpc_id'] = get_vpc_by_tag(tag_name, service_base_name)
- except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create VPC. Exception:" + str(err))
- sys.exit(1)
-
- allowed_vpc_cidr_ip_ranges = list()
- for cidr in get_vpc_cidr_by_id(os.environ['aws_vpc_id']):
- allowed_vpc_cidr_ip_ranges.append({"CidrIp": cidr})
-
- try:
- if os.environ['conf_duo_vpc_enable'] == 'true' and not os.environ['aws_vpc2_id']:
- raise KeyError
- except KeyError:
- try:
- pre_defined_vpc2 = True
- logging.info('[CREATE SECONDARY VPC AND ROUTE TABLE]')
- print('[CREATE SECONDARY VPC AND ROUTE TABLE]')
- params = "--vpc {} --region {} --infra_tag_name {} --infra_tag_value {} --secondary " \
- "--vpc_name {}".format(vpc2_cidr, region, tag2_name, service_base_name, vpc2_name)
+ print("Subnet hasn't been created.")
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
try:
- local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
- traceback.print_exc()
- raise Exception
- os.environ['aws_vpc2_id'] = get_vpc_by_tag(tag2_name, service_base_name)
- except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create secondary VPC. Exception:" + str(err))
- if pre_defined_vpc:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- sys.exit(1)
+ print("There are no VPC Endpoints")
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+ sys.exit(1)
+ try:
+ if os.environ['conf_duo_vpc_enable'] == 'true' and os.environ['aws_vpc_id'] and os.environ['aws_vpc2_id']:
+ raise KeyError
+ except KeyError:
try:
- if os.environ['aws_subnet_id'] == '':
- raise KeyError
- except KeyError:
- try:
- pre_defined_subnet = True
- logging.info('[CREATE SUBNET]')
- print('[CREATE SUBNET]')
- params = "--vpc_id {0} --username {1} --infra_tag_name {2} --infra_tag_value {3} --prefix {4} " \
- "--ssn {5} --zone {6} --subnet_name {7}".format(os.environ['aws_vpc_id'], 'ssn', tag_name,
- service_base_name, '20', True, zone_full, subnet_name)
+ logging.info('[CREATE PEERING CONNECTION]')
+ print('[CREATE PEERING CONNECTION]')
+ os.environ['aws_peering_id'] = dlab.actions_lib.create_peering_connection(
+ os.environ['aws_vpc_id'], os.environ['aws_vpc2_id'], ssn_conf['service_base_name'])
+ print('PEERING CONNECTION ID:' + os.environ['aws_peering_id'])
+ dlab.actions_lib.create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'],
+ os.environ['aws_peering_id'],
+ dlab.meta_lib.get_cidr_by_vpc(os.environ['aws_vpc2_id']))
+ except Exception as err:
+ dlab.fab.append_result("Failed to create peering connection.", str(err))
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
try:
- local("~/scripts/{}.py {}".format('common_create_subnet', params))
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
except:
- traceback.print_exc()
- raise Exception
- with open('/tmp/ssn_subnet_id', 'r') as f:
- os.environ['aws_subnet_id'] = f.read()
- enable_auto_assign_ip(os.environ['aws_subnet_id'])
- except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create Subnet.", str(err))
- if pre_defined_vpc:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_route_tables(tag_name, True)
- try:
- remove_subnets(service_base_name + "-subnet")
- except:
- print("Subnet hasn't been created.")
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
- sys.exit(1)
-
- try:
- if os.environ['conf_duo_vpc_enable'] == 'true' and os.environ['aws_vpc_id'] and os.environ['aws_vpc2_id']:
- raise KeyError
- except KeyError:
- try:
- logging.info('[CREATE PEERING CONNECTION]')
- print('[CREATE PEERING CONNECTION]')
- os.environ['aws_peering_id'] = create_peering_connection(os.environ['aws_vpc_id'],
- os.environ['aws_vpc2_id'], service_base_name)
- print('PEERING CONNECTION ID:' + os.environ['aws_peering_id'])
- create_route_by_id(os.environ['aws_subnet_id'], os.environ['aws_vpc_id'], os.environ['aws_peering_id'],
- get_cidr_by_vpc(os.environ['aws_vpc2_id']))
- except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create peering connection.", str(err))
- if pre_defined_vpc:
- remove_route_tables(tag_name, True)
- try:
- remove_subnets(service_base_name + "-subnet")
- except:
- print("Subnet hasn't been created.")
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
- sys.exit(1)
-
- try:
- if os.environ['aws_security_groups_ids'] == '':
- raise KeyError
- except KeyError:
- try:
- pre_defined_sg = True
- logging.info('[CREATE SG FOR SSN]')
- print('[CREATE SG FOR SSN]')
- ingress_sg_rules_template = format_sg([
- {
- "PrefixListIds": [],
- "FromPort": 80,
- "IpRanges": allowed_ip_cidr,
- "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
- },
- {
- "PrefixListIds": [],
- "FromPort": 22,
- "IpRanges": allowed_ip_cidr,
- "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": []
- },
- {
- "PrefixListIds": [],
- "FromPort": 443,
- "IpRanges": allowed_ip_cidr,
- "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
- },
- {
- "PrefixListIds": [],
- "FromPort": -1,
- "IpRanges": allowed_ip_cidr,
- "ToPort": -1, "IpProtocol": "icmp", "UserIdGroupPairs": []
- },
- {
- "PrefixListIds": [],
- "FromPort": 80,
- "IpRanges": allowed_vpc_cidr_ip_ranges,
- "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
- },
- {
- "PrefixListIds": [],
- "FromPort": 443,
- "IpRanges": allowed_vpc_cidr_ip_ranges,
- "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
- }
- ])
- egress_sg_rules_template = format_sg([
- {"IpProtocol": "-1", "IpRanges": [{"CidrIp": all_ip_cidr}], "UserIdGroupPairs": [], "PrefixListIds": []}
- ])
- params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
- "--infra_tag_value {} --force {} --ssn {}". \
- format(sg_name, os.environ['aws_vpc_id'], json.dumps(ingress_sg_rules_template),
- json.dumps(egress_sg_rules_template), service_base_name, tag_name, False, True)
+ print("Subnet hasn't been created.")
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
try:
- local("~/scripts/{}.py {}".format('common_create_security_group', params))
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
- traceback.print_exc()
- raise Exception
- with open('/tmp/ssn_sg_id', 'r') as f:
- os.environ['aws_security_groups_ids'] = f.read()
- except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating security group for SSN.", str(err))
- if pre_defined_vpc:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
- try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
- except:
- print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
- sys.exit(1)
+ print("There are no VPC Endpoints")
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+ sys.exit(1)
+
+ try:
+ if os.environ['aws_security_groups_ids'] == '':
+ raise KeyError
+ except KeyError:
+ try:
+ ssn_conf['pre_defined_sg'] = True
+ logging.info('[CREATE SG FOR SSN]')
+ print('[CREATE SG FOR SSN]')
+ ssn_conf['ingress_sg_rules_template'] = dlab.meta_lib.format_sg([
+ {
+ "PrefixListIds": [],
+ "FromPort": 80,
+ "IpRanges": ssn_conf['allowed_ip_cidr'],
+ "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+ },
+ {
+ "PrefixListIds": [],
+ "FromPort": 22,
+ "IpRanges": ssn_conf['allowed_ip_cidr'],
+ "ToPort": 22, "IpProtocol": "tcp", "UserIdGroupPairs": []
+ },
+ {
+ "PrefixListIds": [],
+ "FromPort": 443,
+ "IpRanges": ssn_conf['allowed_ip_cidr'],
+ "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+ },
+ {
+ "PrefixListIds": [],
+ "FromPort": -1,
+ "IpRanges": ssn_conf['allowed_ip_cidr'],
+ "ToPort": -1, "IpProtocol": "icmp", "UserIdGroupPairs": []
+ },
+ {
+ "PrefixListIds": [],
+ "FromPort": 80,
+ "IpRanges": ssn_conf['allowed_vpc_cidr_ip_ranges'],
+ "ToPort": 80, "IpProtocol": "tcp", "UserIdGroupPairs": []
+ },
+ {
+ "PrefixListIds": [],
+ "FromPort": 443,
+ "IpRanges": ssn_conf['allowed_vpc_cidr_ip_ranges'],
+ "ToPort": 443, "IpProtocol": "tcp", "UserIdGroupPairs": []
+ }
+ ])
+ egress_sg_rules_template = dlab.meta_lib.format_sg([
+ {"IpProtocol": "-1", "IpRanges": [{"CidrIp": ssn_conf['all_ip_cidr']}], "UserIdGroupPairs": [],
+ "PrefixListIds": []}
+ ])
+ params = "--name {} --vpc_id {} --security_group_rules '{}' --egress '{}' --infra_tag_name {} " \
+ "--infra_tag_value {} --force {} --ssn {}". \
+ format(ssn_conf['sg_name'], os.environ['aws_vpc_id'],
+ json.dumps(ssn_conf['ingress_sg_rules_template']), json.dumps(egress_sg_rules_template),
+ ssn_conf['service_base_name'], ssn_conf['tag_name'], False, True)
+ try:
+ local("~/scripts/{}.py {}".format('common_create_security_group', params))
+ except:
+ traceback.print_exc()
+ raise Exception
+ with open('/tmp/ssn_sg_id', 'r') as f:
+ os.environ['aws_security_groups_ids'] = f.read()
+ except Exception as err:
+ dlab.gab_lib.append_result("Failed creating security group for SSN.", str(err))
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
+ try:
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ except:
+ print("There are no VPC Endpoints")
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
+ sys.exit(1)
+
+ try:
logging.info('[CREATE ROLES]')
print('[CREATE ROLES]')
params = "--role_name {} --role_profile_name {} --policy_name {} --policy_file_name {} --region {} " \
"--infra_tag_name {} --infra_tag_value {} --user_tag_value {}".\
- format(role_name, role_profile_name, policy_name, policy_path, os.environ['aws_region'], tag_name,
- service_base_name, user_tag)
+ format(ssn_conf['role_name'], ssn_conf['role_profile_name'], ssn_conf['policy_name'],
+ ssn_conf['policy_path'], os.environ['aws_region'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'], ssn_conf['user_tag'])
try:
local("~/scripts/{}.py {}".format('common_create_role_policy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to create roles.", str(err))
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
+ dlab.fab.append_result("Unable to create roles.", str(err))
+ if ssn_conf['pre_defined_sg']:
+ dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+ if ssn_conf['pre_defined_subnet']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
sys.exit(1)
try:
logging.info('[CREATE ENDPOINT AND ROUTE-TABLE]')
print('[CREATE ENDPOINT AND ROUTE-TABLE]')
params = "--vpc_id {} --region {} --infra_tag_name {} --infra_tag_value {}".format(
- os.environ['aws_vpc_id'], os.environ['aws_region'], tag_name, service_base_name)
+ os.environ['aws_vpc_id'], os.environ['aws_region'], ssn_conf['tag_name'], ssn_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('ssn_create_endpoint', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to create an endpoint.", str(err))
- remove_all_iam_resources(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
+ dlab.fab.append_result("Unable to create an endpoint.", str(err))
+ dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+ if ssn_conf['pre_defined_sg']:
+ dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+ if ssn_conf['pre_defined_subnet']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
sys.exit(1)
if os.environ['conf_duo_vpc_enable'] == 'true':
@@ -355,42 +371,44 @@
logging.info('[CREATE ENDPOINT AND ROUTE-TABLE FOR NOTEBOOK VPC]')
print('[CREATE ENDPOINT AND ROUTE-TABLE FOR NOTEBOOK VPC]')
params = "--vpc_id {} --region {} --infra_tag_name {} --infra_tag_value {}".format(
- os.environ['aws_vpc2_id'], os.environ['aws_region'], tag2_name, service_base_name)
+ os.environ['aws_vpc2_id'], os.environ['aws_region'], ssn_conf['tag2_name'],
+ ssn_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('ssn_create_endpoint', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to create secondary endpoint.", str(err))
- remove_all_iam_resources(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
+ dlab.fab.append_result("Unable to create secondary endpoint.", str(err))
+ dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+ if ssn_conf['pre_defined_sg']:
+ dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+ if ssn_conf['pre_defined_subnet']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
sys.exit(1)
try:
logging.info('[CREATE SSN INSTANCE]')
print('[CREATE SSN INSTANCE]')
- params = "--node_name {0} --ami_id {1} --instance_type {2} --key_name {3} --security_group_ids {4} --subnet_id {5} " \
- "--iam_profile {6} --infra_tag_name {7} --infra_tag_value {8} --instance_class {9} --primary_disk_size {10}".\
- format(instance_name, ssn_ami_id, os.environ['aws_ssn_instance_size'], os.environ['conf_key_name'],
- os.environ['aws_security_groups_ids'], os.environ['aws_subnet_id'],
- role_profile_name, tag_name, instance_name, 'ssn', '20')
+ params = "--node_name {0} --ami_id {1} --instance_type {2} --key_name {3} --security_group_ids {4} " \
+ "--subnet_id {5} --iam_profile {6} --infra_tag_name {7} --infra_tag_value {8} --instance_class {9} " \
+ "--primary_disk_size {10}".\
+ format(ssn_conf['instance_name'], ssn_conf['ssn_ami_id'], os.environ['aws_ssn_instance_size'],
+ os.environ['conf_key_name'], os.environ['aws_security_groups_ids'], os.environ['aws_subnet_id'],
+ ssn_conf['role_profile_name'], ssn_conf['tag_name'], ssn_conf['instance_name'], 'ssn', '20')
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
@@ -398,107 +416,112 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to create ssn instance.", str(err))
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
+ dlab.fab.append_result("Unable to create ssn instance.", str(err))
+ dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+ dlab.actions_lib.remove_s3(ssn_conf['instance'])
+ if ssn_conf['pre_defined_sg']:
+ dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+ if ssn_conf['pre_defined_subnet']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
sys.exit(1)
- if network_type == 'public':
+ if ssn_conf['network_type'] == 'public':
try:
logging.info('[ASSOCIATING ELASTIC IP]')
print('[ASSOCIATING ELASTIC IP]')
- ssn_id = get_instance_by_name(tag_name, instance_name)
+ ssn_conf['ssn_id'] = dlab.meta_lib.get_instance_by_name(ssn_conf['tag_name'], ssn_conf['instance_name'])
try:
- elastic_ip = os.environ['ssn_elastic_ip']
+ ssn_conf['elastic_ip'] = os.environ['ssn_elastic_ip']
except:
- elastic_ip = 'None'
+ ssn_conf['elastic_ip'] = 'None'
params = "--elastic_ip {} --ssn_id {} --infra_tag_name {} --infra_tag_value {}".format(
- elastic_ip, ssn_id, tag_name, elastic_ip_name)
+ ssn_conf['elastic_ip'], ssn_conf['ssn_id'], ssn_conf['tag_name'], ssn_conf['elastic_ip_name'])
try:
local("~/scripts/{}.py {}".format('ssn_associate_elastic_ip', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to associate elastic ip.", str(err))
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
+ dlab.fab.append_result("Failed to associate elastic ip.", str(err))
+ dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+ dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+ dlab.actions_lib.remove_s3(ssn_conf['instance'])
+ if ssn_conf['pre_defined_sg']:
+ dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+ if ssn_conf['pre_defined_subnet']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
sys.exit(1)
- if network_type == 'private':
- instance_ip = get_instance_ip_address(tag_name, instance_name).get('Private')
+ if ssn_conf['network_type'] == 'private':
+ ssn_conf['instance_ip'] = dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+ ssn_conf['instance_name']).get('Private')
else:
- instance_ip = get_instance_ip_address(tag_name, instance_name).get('Public')
+ ssn_conf['instance_ip'] = dlab.meta_lib.get_instance_ip_address(ssn_conf['tag_name'],
+ ssn_conf['instance_name']).get('Public')
if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and 'ssn_subdomain' in os.environ:
try:
logging.info('[CREATING ROUTE53 RECORD]')
print('[CREATING ROUTE53 RECORD]')
try:
- create_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'], instance_ip)
+ dlab.actions_lib.create_route_53_record(os.environ['ssn_hosted_zone_id'],
+ os.environ['ssn_hosted_zone_name'],
+ os.environ['ssn_subdomain'], ssn_conf['instance_ip'])
except:
traceback.print_exc()
raise Exception
except Exception as err:
- append_result("Failed to create route53 record.", str(err))
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
+ dlab.fab.append_result("Failed to create route53 record.", str(err))
+ dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'],
+ os.environ['ssn_hosted_zone_name'],
os.environ['ssn_subdomain'])
- remove_ec2(tag_name, instance_name)
- remove_all_iam_resources(instance)
- remove_s3(instance)
- if pre_defined_sg:
- remove_sgroups(tag_name)
- if pre_defined_subnet:
- remove_internet_gateways(os.environ['aws_vpc_id'], tag_name, service_base_name)
- remove_subnets(service_base_name + "-subnet")
- if pre_defined_vpc:
- remove_vpc_endpoints(os.environ['aws_vpc_id'])
- remove_route_tables(tag_name, True)
- remove_vpc(os.environ['aws_vpc_id'])
- if pre_defined_vpc2:
- remove_peering('*')
+ dlab.actions_lib.remove_ec2(ssn_conf['tag_name'], ssn_conf['instance_name'])
+ dlab.actions_lib.remove_all_iam_resources(ssn_conf['instance'])
+ dlab.actions_lib.remove_s3(ssn_conf['instance'])
+ if ssn_conf['pre_defined_sg']:
+ dlab.actions_lib.remove_sgroups(ssn_conf['tag_name'])
+ if ssn_conf['pre_defined_subnet']:
+ dlab.actions_lib.remove_internet_gateways(os.environ['aws_vpc_id'], ssn_conf['tag_name'],
+ ssn_conf['service_base_name'])
+ dlab.actions_lib.remove_subnets(ssn_conf['subnet_name'])
+ if ssn_conf['pre_defined_vpc']:
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc_id'])
+ if ssn_conf['pre_defined_vpc2']:
+ dlab.actions_lib.remove_peering('*')
try:
- remove_vpc_endpoints(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_vpc_endpoints(os.environ['aws_vpc2_id'])
except:
print("There are no VPC Endpoints")
- remove_route_tables(tag2_name, True)
- remove_vpc(os.environ['aws_vpc2_id'])
+ dlab.actions_lib.remove_route_tables(ssn_conf['tag2_name'], True)
+ dlab.actions_lib.remove_vpc(os.environ['aws_vpc2_id'])
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
index 76a119d..975e8d3 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate.py
@@ -21,11 +21,16 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import sys
+import os
+import logging
+import traceback
from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import json
if __name__ == "__main__":
local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -35,17 +40,17 @@
filename=local_log_filepath)
# generating variables dictionary
if 'aws_access_key' in os.environ and 'aws_secret_access_key' in os.environ:
- create_aws_config_files(generate_full_config=True)
+ dlab.actions_lib.create_aws_config_files(generate_full_config=True)
else:
- create_aws_config_files()
+ dlab.actions_lib.create_aws_config_files()
print('Generating infrastructure names and tags')
ssn_conf = dict()
- ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- ssn_conf['tag_name'] = ssn_conf['service_base_name'] + '-Tag'
+ ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
+ ssn_conf['tag_name'] = ssn_conf['service_base_name'] + '-tag'
ssn_conf['edge_sg'] = ssn_conf['service_base_name'] + "*" + '-edge'
ssn_conf['nb_sg'] = ssn_conf['service_base_name'] + "*" + '-nb'
- ssn_conf['de_sg'] = ssn_conf['service_base_name'] + "*" + '-dataengine*'
+ ssn_conf['de_sg'] = ssn_conf['service_base_name'] + "*" + '-de*'
ssn_conf['de-service_sg'] = ssn_conf['service_base_name'] + "*" + '-des-*'
try:
@@ -61,7 +66,7 @@
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed to terminate ssn.", str(err))
+ dlab.fab.append_result("Failed to terminate ssn.", str(err))
sys.exit(1)
try:
@@ -70,6 +75,6 @@
"Action": "Terminate ssn with all service_base_name environment"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
index 7aa6629..27b5913 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/ssn_terminate_aws_resources.py
@@ -21,12 +21,13 @@
#
# ******************************************************************************
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
import boto3
import argparse
import sys
-from dlab.ssn_lib import *
import os
parser = argparse.ArgumentParser()
@@ -37,7 +38,7 @@
parser.add_argument('--service_base_name', type=str)
parser.add_argument('--de_se_sg', type=str)
args = parser.parse_args()
-tag2 = args.service_base_name + '-secondary-Tag'
+tag2 = args.service_base_name + '-secondary-tag'
##############
# Run script #
@@ -46,120 +47,129 @@
if __name__ == "__main__":
print('Terminating EMR cluster')
try:
- clusters_list = get_emr_list(args.tag_name)
+ clusters_list = dlab.meta_lib.get_emr_list(args.tag_name)
if clusters_list:
for cluster_id in clusters_list:
client = boto3.client('emr')
cluster = client.describe_cluster(ClusterId=cluster_id)
cluster = cluster.get("Cluster")
emr_name = cluster.get('Name')
- terminate_emr(cluster_id)
+ dlab.actions_lib.terminate_emr(cluster_id)
print("The EMR cluster {} has been terminated successfully".format(emr_name))
else:
print("There are no EMR clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate EMR cluster.", str(err))
sys.exit(1)
print("Deregistering notebook's AMI")
try:
- deregister_image()
- except:
+ dlab.actions_lib.deregister_image()
+ except Exception as err:
+ dlab.fab.append_result("Failed to deregister images.", str(err))
sys.exit(1)
print("Terminating EC2 instances")
try:
- remove_ec2(args.tag_name, '*')
- except:
+ dlab.actions_lib.remove_ec2(args.tag_name, '*')
+ except Exception as err:
+ dlab.fab.append_result("Failed to terminate instances.", str(err))
sys.exit(1)
if 'ssn_hosted_zone_id' in os.environ and 'ssn_hosted_zone_name' in os.environ and 'ssn_subdomain' in os.environ:
print("Removing Route53 records")
- remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
- os.environ['ssn_subdomain'])
+ dlab.actions_lib.remove_route_53_record(os.environ['ssn_hosted_zone_id'], os.environ['ssn_hosted_zone_name'],
+ os.environ['ssn_subdomain'])
print("Removing security groups")
try:
- remove_sgroups(args.de_se_sg)
- remove_sgroups(args.de_sg)
- remove_sgroups(args.nb_sg)
- remove_sgroups(args.edge_sg)
+ dlab.actions_lib.remove_sgroups(args.de_se_sg)
+ dlab.actions_lib.remove_sgroups(args.de_sg)
+ dlab.actions_lib.remove_sgroups(args.nb_sg)
+ dlab.actions_lib.remove_sgroups(args.edge_sg)
try:
- remove_sgroups(args.tag_name)
+ dlab.actions_lib.remove_sgroups(args.tag_name)
except:
print("There is no pre-defined SSN SG")
- except:
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove security groups.", str(err))
sys.exit(1)
print("Removing private subnet")
try:
- remove_subnets('*')
- except:
+ dlab.actions_lib.remove_subnets('*')
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove subnets.", str(err))
sys.exit(1)
print("Removing peering connection")
try:
- remove_peering('*')
- except:
+ dlab.actions_lib.remove_peering('*')
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove peering connections.", str(err))
sys.exit(1)
print("Removing s3 buckets")
try:
- remove_s3()
- except:
+ dlab.actions_lib.remove_s3()
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove buckets.", str(err))
sys.exit(1)
print("Removing IAM roles, profiles and policies")
try:
- remove_all_iam_resources('all')
- except:
+ dlab.actions_lib.remove_all_iam_resources('all')
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove IAM roles, profiles and policies.", str(err))
sys.exit(1)
print("Removing route tables")
try:
- remove_route_tables(args.tag_name)
- remove_route_tables(tag2)
- except:
+ dlab.actions_lib.remove_route_tables(args.tag_name)
+ dlab.actions_lib.remove_route_tables(tag2)
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove route tables.", str(err))
sys.exit(1)
print("Removing SSN subnet")
try:
- remove_subnets(args.service_base_name + '-subnet')
- except:
- print("There is no pre-defined SSN Subnet")
+ dlab.actions_lib.remove_subnets(args.service_base_name + '-subnet')
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove SSN subnet.", str(err))
+ sys.exit(1)
print("Removing SSN VPC")
try:
- vpc_id = get_vpc_by_tag(args.tag_name, args.service_base_name)
+ vpc_id = dlab.meta_lib.get_vpc_by_tag(args.tag_name, args.service_base_name)
if vpc_id != '':
try:
- remove_vpc_endpoints(vpc_id)
+ dlab.actions_lib.remove_vpc_endpoints(vpc_id)
except:
print("There is no such VPC Endpoint")
try:
- remove_internet_gateways(vpc_id, args.tag_name, args.service_base_name)
+ dlab.actions_lib.remove_internet_gateways(vpc_id, args.tag_name, args.service_base_name)
except:
print("There is no such Internet gateway")
- remove_route_tables(args.tag_name, True)
- remove_vpc(vpc_id)
+ dlab.actions_lib.remove_route_tables(args.tag_name, True)
+ dlab.actions_lib.remove_vpc(vpc_id)
else:
print("There is no pre-defined SSN VPC")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove SSN VPC.", str(err))
sys.exit(1)
print("Removing notebook VPC")
try:
- vpc_id = get_vpc_by_tag(tag2, args.service_base_name)
+ vpc_id = dlab.meta_lib.get_vpc_by_tag(tag2, args.service_base_name)
if vpc_id != '':
try:
- remove_vpc_endpoints(vpc_id)
+ dlab.actions_lib.remove_vpc_endpoints(vpc_id)
except:
print("There is no such VPC Endpoint")
- remove_route_tables(tag2, True)
- remove_vpc(vpc_id)
+ dlab.actions_lib.remove_route_tables(tag2, True)
+ dlab.actions_lib.remove_vpc(vpc_id)
else:
print("There is no pre-defined notebook VPC")
except Exception as err:
- print('Error: {0}'.format(err))
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to remove wecondary VPC.", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
index a10ccae..6baaf45 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/tensor-rstudio_configure.py
@@ -24,12 +24,14 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
import traceback
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -44,74 +46,78 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['network_type'] = os.environ['conf_network_type']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'],
- notebook_config['instance_name']).get('Private')
- tag = {"Key": notebook_config['tag_name'],
- "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
- notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
- notebook_config['rstudio_pass'] = id_generator()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['network_type'] = os.environ['conf_network_type']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- if notebook_config['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ edge_instance_name).get('Private')
+ notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ edge_instance_name)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -119,9 +125,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -138,9 +143,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -156,9 +160,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring TensorFlow and RSTUDIO and all dependencies
@@ -168,7 +171,7 @@
params = "--hostname {0} --keyfile {1} " \
"--region {2} --rstudio_pass {3} " \
"--rstudio_version {4} --os_user {5} " \
- "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+ "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
.format(instance_hostname, keyfile_name,
os.environ['aws_region'], notebook_config['rstudio_pass'],
os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -180,9 +183,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure tensoflow-rstudio.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure tensoflow-rstudio.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -198,9 +200,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -211,12 +212,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -232,9 +232,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -245,112 +244,121 @@
'tensor': True
}
params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
- .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio', notebook_config['exploratory_name'], json.dumps(additional_info))
+ .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'rstudio',
+ notebook_config['exploratory_name'], json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING AMI]')
- ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+ ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
- os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
except KeyError:
os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
os.environ['project_name'], os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
else:
+ print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
- os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+ os.environ['conf_additional_tags'], os.environ['endpoint_name'])
except KeyError:
os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
- # generating output information
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- tensorboard_url = "http://" + ip_address + ":6006/"
- rstudio_ip_url = "http://" + ip_address + ":8787/"
- rstudio_dns_url = "http://" + dns_name + ":8787/"
- rstudio_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
- tensorboard_access_url = "https://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
- rstudio_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private DNS: {}".format(dns_name))
- print("Private IP: {}".format(ip_address))
- print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(notebook_config['user_keyname']))
- print("AMI name: {}".format(notebook_config['notebook_image_name']))
- print("Profile name: {}".format(notebook_config['role_profile_name']))
- print("SG name: {}".format(notebook_config['security_group_name']))
- print("TensorBoard URL: {}".format(tensorboard_url))
- print("TensorBoard log dir: /var/log/tensorboard")
- print("Rstudio URL: {}".format(rstudio_ip_url))
- print("Rstudio URL: {}".format(rstudio_dns_url))
- print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
- print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
- notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
- notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
+ try:
+ # generating output information
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['instance_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+ tensorboard_url = "http://" + ip_address + ":6006/"
+ rstudio_ip_url = "http://" + ip_address + ":8787/"
+ rstudio_dns_url = "http://" + dns_name + ":8787/"
+ rstudio_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ rstudio_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private DNS: {}".format(dns_name))
+ print("Private IP: {}".format(ip_address))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name'])))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['user_keyname']))
+ print("AMI name: {}".format(notebook_config['notebook_image_name']))
+ print("Profile name: {}".format(notebook_config['role_profile_name']))
+ print("SG name: {}".format(notebook_config['security_group_name']))
+ print("TensorBoard URL: {}".format(tensorboard_url))
+ print("TensorBoard log dir: /var/log/tensorboard")
+ print("Rstudio URL: {}".format(rstudio_ip_url))
+ print("Rstudio URL: {}".format(rstudio_dns_url))
+ print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+ print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+ print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": dns_name,
- "ip": ip_address,
- "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
- "master_keyname": os.environ['conf_key_name'],
- "tensorboard_log_dir": "/var/log/tensorboard",
- "notebook_name": notebook_config['instance_name'],
- "notebook_image_name": notebook_config['notebook_image_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "RStudio",
- "url": rstudio_notebook_access_url},
- {"description": "TensorBoard",
- "url": tensorboard_access_url},
- {"description": "Ungit",
- "url": rstudio_ungit_access_url}#,
- #{"description": "RStudio (via tunnel)",
- # "url": rstudio_ip_url},
- #{"description": "TensorBoard (via tunnel)",
- # "url": tensorboard_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ],
- "exploratory_user": notebook_config['dlab_ssh_user'],
- "exploratory_pass": notebook_config['rstudio_pass']}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": dns_name,
+ "ip": ip_address,
+ "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name']),
+ "master_keyname": os.environ['conf_key_name'],
+ "tensorboard_log_dir": "/var/log/tensorboard",
+ "notebook_name": notebook_config['instance_name'],
+ "notebook_image_name": notebook_config['notebook_image_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "RStudio",
+ "url": rstudio_notebook_access_url},
+ {"description": "TensorBoard",
+ "url": tensorboard_access_url},
+ {"description": "Ungit",
+ "url": rstudio_ungit_access_url}#,
+ #{"description": "RStudio (via tunnel)",
+ # "url": rstudio_ip_url},
+ #{"description": "TensorBoard (via tunnel)",
+ # "url": tensorboard_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ],
+ "exploratory_user": notebook_config['dlab_ssh_user'],
+ "exploratory_pass": notebook_config['rstudio_pass']}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
index ebcb814..3cf3a46 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py
@@ -24,12 +24,14 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
import traceback
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -44,72 +46,77 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['network_type'] = os.environ['conf_network_type']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- tag = {"Key": notebook_config['tag_name'],
- "Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['project_name'])}
- notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
- notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['network_type'] = os.environ['conf_network_type']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- if notebook_config['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ edge_instance_name).get('Private')
+ notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ edge_instance_name)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -117,9 +124,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -136,9 +142,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -154,9 +159,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring TensorFlow and all dependencies
@@ -165,7 +169,7 @@
print('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
params = "--hostname {0} --keyfile {1} " \
"--region {2} --os_user {3} " \
- "--ip_adress {4} --exploratory_name {5} --edge_ip {6}" \
+ "--ip_address {4} --exploratory_name {5} --edge_ip {6}" \
.format(instance_hostname, keyfile_name,
os.environ['aws_region'], notebook_config['dlab_ssh_user'],
notebook_config['ip_address'], notebook_config['exploratory_name'], edge_ip)
@@ -175,9 +179,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure TensorFlow.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -193,9 +196,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -206,12 +208,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -227,9 +228,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -240,106 +240,114 @@
'tensor': True
}
params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
- .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'jupyter',notebook_config['exploratory_name'], json.dumps(additional_info))
+ .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'jupyter',
+ notebook_config['exploratory_name'], json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING AMI]')
- ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
+ ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
- os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
except KeyError:
os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
os.environ['project_name'], os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
else:
+ print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
- os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+ os.environ['conf_additional_tags'], os.environ['endpoint_name'])
except KeyError:
os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
+ try:
+ # generating output information
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['instance_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+ tensorboard_url = "http://" + ip_address + ":6006/"
+ jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+ jupyter_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ tensorboard_access_url = "https://{}/{}-tensor/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ jupyter_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private DNS: {}".format(dns_name))
+ print("Private IP: {}".format(ip_address))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name'])))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['user_keyname']))
+ print("AMI name: {}".format(notebook_config['notebook_image_name']))
+ print("Profile name: {}".format(notebook_config['role_profile_name']))
+ print("SG name: {}".format(notebook_config['security_group_name']))
+ print("TensorBoard URL: {}".format(tensorboard_url))
+ print("TensorBoard log dir: /var/log/tensorboard")
+ print("Jupyter URL: {}".format(jupyter_ip_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+ print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
- # generating output information
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- tensorboard_url = "http://" + ip_address + ":6006/"
- jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
- jupyter_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
- tensorboard_access_url = "https://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
- jupyter_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private DNS: {}".format(dns_name))
- print("Private IP: {}".format(ip_address))
- print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(notebook_config['user_keyname']))
- print("AMI name: {}".format(notebook_config['notebook_image_name']))
- print("Profile name: {}".format(notebook_config['role_profile_name']))
- print("SG name: {}".format(notebook_config['security_group_name']))
- print("TensorBoard URL: {}".format(tensorboard_url))
- print("TensorBoard log dir: /var/log/tensorboard")
- print("Jupyter URL: {}".format(jupyter_ip_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
- notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(
- notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
- with open("/root/result.json", 'w') as result:
- res = {"hostname": dns_name,
- "ip": ip_address,
- "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
- "master_keyname": os.environ['conf_key_name'],
- "tensorboard_log_dir": "/var/log/tensorboard",
- "notebook_name": notebook_config['instance_name'],
- "notebook_image_name": notebook_config['notebook_image_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Jupyter",
- "url": jupyter_notebook_access_url},
- {"description": "TensorBoard",
- "url": tensorboard_access_url},
- {"description": "Ungit",
- "url": jupyter_ungit_access_url}#,
- #{"description": "Jupyter (via tunnel)",
- # "url": jupyter_ip_url},
- #{"description": "TensorBoard (via tunnel)",
- # "url": tensorboard_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": dns_name,
+ "ip": ip_address,
+ "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name']),
+ "master_keyname": os.environ['conf_key_name'],
+ "tensorboard_log_dir": "/var/log/tensorboard",
+ "notebook_name": notebook_config['instance_name'],
+ "notebook_image_name": notebook_config['notebook_image_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Jupyter",
+ "url": jupyter_notebook_access_url},
+ {"description": "TensorBoard",
+ "url": tensorboard_access_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_access_url}#,
+ #{"description": "Jupyter (via tunnel)",
+ # "url": jupyter_ip_url},
+ #{"description": "TensorBoard (via tunnel)",
+ # "url": tensorboard_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
index 8e8e94b..dbdae70 100644
--- a/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/aws/zeppelin_configure.py
@@ -24,12 +24,14 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
import traceback
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -44,77 +46,83 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name']
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower()[:12], '-', True)
- notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['network_type'] = os.environ['conf_network_type']
- notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'], args.uuid)
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if os.environ['conf_shared_image_enabled'] == 'false':
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
- notebook_config['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['application'])
- notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-Profile' \
- .format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['ip_address'] = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
-
- region = os.environ['aws_region']
- if region == 'us-east-1':
- endpoint_url = 'https://s3.amazonaws.com'
- elif region == 'cn-north-1':
- endpoint_url = "https://s3.{}.amazonaws.com.cn".format(region)
- else:
- endpoint_url = 'https://s3-{}.amazonaws.com'.format(region)
-
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
- os.environ['project_name'], os.environ['endpoint_name'])
- edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
- edge_instance_private_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- if notebook_config['network_type'] == 'private':
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
- else:
- edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['network_type'] = os.environ['conf_network_type']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'], args.uuid)
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if os.environ['conf_shared_image_enabled'] == 'false':
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ else:
+ notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
+ notebook_config['role_profile_name'] = '{}-{}-{}-nb-de-profile'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['tag_name'] = '{}-tag'.format(notebook_config['service_base_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['ip_address'] = dlab.meta_lib.get_instance_ip_address(
+ notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
+ notebook_config['region'] = os.environ['aws_region']
+ if notebook_config['region'] == 'us-east-1':
+ notebook_config['endpoint_url'] = 'https://s3.amazonaws.com'
+ elif notebook_config['region'] == 'cn-north-1':
+ notebook_config['endpoint_url'] = "https://s3.{}.amazonaws.com.cn".format(notebook_config['region'])
+ else:
+ notebook_config['endpoint_url'] = 'https://s3-{}.amazonaws.com'.format(notebook_config['region'])
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name'])
+ edge_instance_name = '{}-{}-{}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'], notebook_config['endpoint_name'])
+ edge_instance_hostname = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
+ edge_instance_private_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ edge_instance_name).get('Private')
+ notebook_config['edge_instance_hostname'] = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ edge_instance_name)
+ keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_ip = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Private')
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -122,9 +130,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -141,9 +148,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -160,8 +166,8 @@
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring zeppelin and all dependencies
@@ -169,7 +175,8 @@
logging.info('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
print('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
additional_config = {"frontend_hostname": edge_instance_hostname,
- "backend_hostname": get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name']),
+ "backend_hostname": dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'],
+ notebook_config['instance_name']),
"backend_port": "8080",
"nginx_template_dir": "/root/templates/"}
params = "--hostname {0} --instance_name {1} " \
@@ -180,13 +187,13 @@
"--zeppelin_version {10} --scala_version {11} " \
"--livy_version {12} --multiple_clusters {13} " \
"--r_mirror {14} --endpoint_url {15} " \
- "--ip_adress {16} --exploratory_name {17} --edge_ip {18}" \
+ "--ip_address {16} --exploratory_name {17} --edge_ip {18}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, os.environ['aws_region'],
json.dumps(additional_config), notebook_config['dlab_ssh_user'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], edge_instance_hostname, '3128',
os.environ['notebook_zeppelin_version'], os.environ['notebook_scala_version'],
os.environ['notebook_livy_version'], os.environ['notebook_multiple_clusters'],
- os.environ['notebook_r_mirror'], endpoint_url, notebook_config['ip_address'],
+ os.environ['notebook_r_mirror'], notebook_config['endpoint_url'], notebook_config['ip_address'],
notebook_config['exploratory_name'], edge_ip)
try:
local("~/scripts/{}.py {}".format('configure_zeppelin_node', params))
@@ -194,9 +201,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure zeppelin.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -212,9 +218,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -225,12 +230,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -246,9 +250,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -259,99 +262,106 @@
'tensor': False
}
params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
- .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'zeppelin', notebook_config['exploratory_name'], json.dumps(additional_info))
+ .format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'zeppelin',
+ notebook_config['exploratory_name'], json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING AMI]')
- ami_id = get_ami_id_by_name(`notebook_config['expected_image_name']`)
+ ami_id = dlab.meta_lib.get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '' and notebook_config['shared_image_enabled'] == 'false':
print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';project_tag:{0};endpoint_tag:{1};'.format(
- os.environ['project_name'], os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{2};project_tag:{0};endpoint_tag:{1};'.format(
+ os.environ['project_name'], os.environ['endpoint_name'], os.environ['conf_additional_tags'])
except KeyError:
os.environ['conf_additional_tags'] = 'project_tag:{0};endpoint_tag:{1}'.format(
os.environ['project_name'], os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
else:
+ print("Looks like it's first time we configure notebook server. Creating image.")
try:
- os.environ['conf_additional_tags'] = os.environ[
- 'conf_additional_tags'] + ';ami:shared;endpoint_tag:{};'.format(
- os.environ['endpoint_name'])
+ os.environ['conf_additional_tags'] = '{};ami:shared;endpoint_tag:{};'.format(
+ os.environ['conf_additional_tags'], os.environ['endpoint_name'])
except KeyError:
os.environ['conf_additional_tags'] = 'ami:shared;endpoint_tag:{}'.format(
os.environ['endpoint_name'])
- image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
- instance_name=notebook_config['instance_name'],
- image_name=notebook_config['expected_image_name'])
+ image_id = dlab.actions_lib.create_image_from_instance(
+ tag_name=notebook_config['tag_name'], instance_name=notebook_config['instance_name'],
+ image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
+ try:
+ # generating output information
+ ip_address = dlab.meta_lib.get_instance_ip_address(notebook_config['tag_name'],
+ notebook_config['instance_name']).get('Private')
+ dns_name = dlab.meta_lib.get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
+ zeppelin_ip_url = "http://" + ip_address + ":8080/"
+ zeppelin_dns_url = "http://" + dns_name + ":8080/"
+ zeppelin_notebook_access_url = "https://{}/{}/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ zeppelin_ungit_access_url = "https://{}/{}-ungit/".format(notebook_config['edge_instance_hostname'],
+ notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private DNS: {}".format(dns_name))
+ print("Private IP: {}".format(ip_address))
+ print("Instance ID: {}".format(dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name'])))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['user_keyname']))
+ print("AMI name: {}".format(notebook_config['notebook_image_name']))
+ print("Profile name: {}".format(notebook_config['role_profile_name']))
+ print("SG name: {}".format(notebook_config['security_group_name']))
+ print("Zeppelin URL: {}".format(zeppelin_ip_url))
+ print("Zeppelin URL: {}".format(zeppelin_dns_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
+ print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
+ format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
- # generating output information
- ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
- dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
- zeppelin_ip_url = "http://" + ip_address + ":8080/"
- zeppelin_dns_url = "http://" + dns_name + ":8080/"
- zeppelin_notebook_access_url = "https://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
- zeppelin_ungit_access_url = "https://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private DNS: {}".format(dns_name))
- print("Private IP: {}".format(ip_address))
- print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(notebook_config['user_keyname']))
- print("AMI name: {}".format(notebook_config['notebook_image_name']))
- print("Profile name: {}".format(notebook_config['role_profile_name']))
- print("SG name: {}".format(notebook_config['security_group_name']))
- print("Zeppelin URL: {}".format(zeppelin_ip_url))
- print("Zeppelin URL: {}".format(zeppelin_dns_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
- format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
-
- with open("/root/result.json", 'w') as result:
- res = {"hostname": dns_name,
- "ip": ip_address,
- "instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "notebook_image_name": notebook_config['notebook_image_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Apache Zeppelin",
- "url": zeppelin_notebook_access_url},
- {"description": "Ungit",
- "url": zeppelin_ungit_access_url}#,
- #{"description": "Apache Zeppelin (via tunnel)",
- # "url": zeppelin_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": dns_name,
+ "ip": ip_address,
+ "instance_id": dlab.meta_lib.get_instance_by_name(notebook_config['tag_name'],
+ notebook_config['instance_name']),
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "notebook_image_name": notebook_config['notebook_image_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Apache Zeppelin",
+ "url": zeppelin_notebook_access_url},
+ {"description": "Ungit",
+ "url": zeppelin_ungit_access_url}#,
+ #{"description": "Apache Zeppelin (via tunnel)",
+ # "url": zeppelin_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ dlab.actions_lib.remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py b/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
index cb7073a..295e191 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py
@@ -82,7 +82,7 @@
args.security_group_name,
json.loads(args.tags),
args.public_ip_name)
- disk = AzureMeta().get_disk(args.resource_group_name, '{}-disk0'.format(
+ disk = AzureMeta().get_disk(args.resource_group_name, '{}-volume-primary'.format(
args.instance_name))
if disk:
create_option = 'attach'
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py b/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
index c3024c5..5746fb8 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_create_notebook_image.py
@@ -21,76 +21,82 @@
#
# ******************************************************************************
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import traceback
import sys
import json
+from fabric.api import *
if __name__ == "__main__":
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
image_conf = dict()
image_conf['service_base_name'] = os.environ['conf_service_base_name']
image_conf['resource_group_name'] = os.environ['azure_resource_group_name']
- image_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-')
- image_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- image_conf['project_tag'] = os.environ['project_name'].replace('_', '-')
- image_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-')
- image_conf['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
+ image_conf['user_name'] = os.environ['edge_user_name']
+ image_conf['project_name'] = os.environ['project_name']
+ image_conf['project_tag'] = image_conf['project_name']
+ image_conf['endpoint_name'] = os.environ['endpoint_name']
+ image_conf['endpoint_tag'] = image_conf['endpoint_name']
image_conf['instance_name'] = os.environ['notebook_instance_name']
image_conf['application'] = os.environ['application']
image_conf['dlab_ssh_user'] = os.environ['conf_os_user']
- image_conf['image_name'] = os.environ['notebook_image_name'].lower().replace('_', '-')
- image_conf['full_image_name'] = '{}-{}-{}-{}'.format(image_conf['service_base_name'],
- image_conf['project_name'],
- image_conf['application'],
- image_conf['image_name']).lower()
- image_conf['tags'] = {"Name": image_conf['service_base_name'],
+ image_conf['image_name'] = os.environ['notebook_image_name']
+ image_conf['full_image_name'] = '{}-{}-{}-{}-{}'.format(image_conf['service_base_name'],
+ image_conf['project_name'],
+ image_conf['endpoint_name'],
+ image_conf['application'],
+ image_conf['image_name'])
+ image_conf['tags'] = {"Instance_Name": image_conf['instance_name'],
"SBN": image_conf['service_base_name'],
"User": image_conf['user_name'],
"project_tag": image_conf['project_tag'],
"endpoint_tag": image_conf['endpoint_tag'],
"Image": image_conf['image_name'],
- "FIN": image_conf['full_image_name'],
+ "Name": image_conf['full_image_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
- image_conf['instance_name'])
+ instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
+ image_conf['instance_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(image_conf['service_base_name'],
image_conf['project_name'],
image_conf['endpoint_name'])
- edge_instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
+ edge_instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
edge_instance_name)
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- instance = AzureMeta().get_instance(image_conf['resource_group_name'], image_conf['instance_name'])
+ instance = AzureMeta.get_instance(image_conf['resource_group_name'], image_conf['instance_name'])
os.environ['azure_notebook_instance_size'] = instance.hardware_profile.vm_size
os.environ['exploratory_name'] = instance.tags['Exploratory']
os.environ['notebook_image_name'] = image_conf['image_name']
- image = AzureMeta().get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+ image = AzureMeta.get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
if image == '':
print('Creating image from existing notebook.')
- prepare_vm_for_image(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
- AzureActions().create_image_from_instance(image_conf['resource_group_name'],
- image_conf['instance_name'],
- os.environ['azure_region'],
- image_conf['full_image_name'],
- json.dumps(image_conf['tags']))
+ dlab.actions_lib.prepare_vm_for_image(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
+ AzureActions.create_image_from_instance(image_conf['resource_group_name'],
+ image_conf['instance_name'],
+ os.environ['azure_region'],
+ image_conf['full_image_name'],
+ json.dumps(image_conf['tags']))
print("Image was successfully created.")
try:
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
- if AzureMeta().get_instance_status(image_conf['resource_group_name'],
- image_conf['instance_name']) == 'running':
+ if AzureMeta.get_instance_status(image_conf['resource_group_name'],
+ image_conf['instance_name']) == 'running':
instance_running = True
- instance_hostname = AzureMeta().get_private_ip_address(image_conf['resource_group_name'],
- image_conf['instance_name'])
- remount_azure_disk(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
- set_git_proxy(image_conf['dlab_ssh_user'], instance_hostname, keyfile_name,
- 'http://{}:3128'.format(edge_instance_hostname))
+ instance_hostname = AzureMeta.get_private_ip_address(image_conf['resource_group_name'],
+ image_conf['instance_name'])
+ dlab.actions_lib.remount_azure_disk(True, image_conf['dlab_ssh_user'], instance_hostname, keyfile_name)
+ dlab.fab.set_git_proxy(image_conf['dlab_ssh_user'], instance_hostname, keyfile_name,
+ 'http://{}:3128'.format(edge_instance_hostname))
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, image_conf['instance_name'], keyfile_name,
@@ -98,9 +104,8 @@
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
print("Image was successfully created. It's name is {}".format(image_conf['full_image_name']))
except Exception as err:
- print('Error: {0}'.format(err))
- AzureActions().remove_instance(image_conf['resource_group_name'], image_conf['instance_name'])
- append_result("Failed to create instance from image.", str(err))
+ AzureActions.remove_instance(image_conf['resource_group_name'], image_conf['instance_name'])
+ dlab.fab.append_result("Failed to create instance from image.", str(err))
sys.exit(1)
with open("/root/result.json", 'w') as result:
@@ -114,6 +119,5 @@
"Action": "Create image from notebook"}
result.write(json.dumps(res))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create image from notebook", str(err))
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to create image from notebook", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py b/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
index cfe37fc..2a9e606 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_download_git_certfile.py
@@ -35,9 +35,9 @@
args = parser.parse_args()
resource_group_name = os.environ['azure_resource_group_name']
-ssn_storage_account_tag = ('{0}-{1}-{2}-storage'.format(os.environ['conf_service_base_name'], os.environ['project_name'],
- os.environ['endpoint_name']))
-container_name = ('{}-ssn-container'.format(os.environ['conf_service_base_name'])).lower().replace('_', '-')
+ssn_storage_account_tag = ('{0}-{1}-{2}-bucket'.format(os.environ['conf_service_base_name'], os.environ['project_name'],
+ os.environ['endpoint_name']))
+container_name = ('{}-ssn-bucket'.format(os.environ['conf_service_base_name'])).lower().replace('_', '-')
gitlab_certfile = os.environ['conf_gitlab_certfile']
if __name__ == "__main__":
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
index b755c91..2e697eb 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import uuid
+from fabric.api import *
+import traceback
+
+
+def clear_resources():
+ for i in range(notebook_config['instance_count'] - 1):
+ slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+ AzureActions.remove_instance(notebook_config['resource_group_name'], slave_name)
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
if __name__ == "__main__":
@@ -41,48 +50,50 @@
try:
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
- try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ else:
notebook_config['exploratory_name'] = ''
- try:
- notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ notebook_config['computational_name'] = os.environ['computational_name']
+ else:
notebook_config['computational_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['region'] = os.environ['azure_region']
- notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
- notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
- '-de-' + notebook_config['exploratory_name'] + '-' + \
- notebook_config['computational_name']
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['cluster_name'] = '{}-{}-{}-de-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['computational_name'])
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
- notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+ notebook_config['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
- notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
+ notebook_config['spark_master_ip'] = AzureMeta.get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['master_node_name'])
- notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+ notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to get instance IP address", str(err))
+ clear_resources()
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
- append_result("Failed to generate infrastructure names", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
@@ -100,12 +111,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
- append_result("Failed installing Dataengine kernels.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
@@ -125,12 +132,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
- append_result("Failed to configure Spark.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
@@ -139,6 +142,7 @@
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
index d5be204..a4dda9d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_prepare_notebook.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
import os
from Crypto.PublicKey import RSA
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
+from fabric.api import *
if __name__ == "__main__":
@@ -42,17 +44,19 @@
# generating variables dictionary
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
notebook_config = dict()
- notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['application'] = os.environ['application'].lower().replace('_', '-')
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['application'] = os.environ['application'].lower()
print('Generating infrastructure names and tags')
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
@@ -62,8 +66,9 @@
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
@@ -73,9 +78,11 @@
"product": "dlab"}
notebook_config['network_interface_name'] = notebook_config['instance_name'] + "-nif"
notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'])
- notebook_config['private_subnet_name'] = '{}-{}-subnet'.format(notebook_config['service_base_name'],
- notebook_config['project_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ notebook_config['private_subnet_name'] = '{}-{}-{}-subnet'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
ssh_key_path = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
key = RSA.importKey(open(ssh_key_path, 'rb').read())
notebook_config['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
@@ -83,33 +90,32 @@
notebook_config['instance_storage_account_type'] = (lambda x: 'Standard_LRS' if x in ('deeplearning', 'tensor')
else 'Premium_LRS')(os.environ['application'])
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
notebook_config['image_type'] = 'default'
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
if notebook_config['shared_image_enabled'] == 'false':
notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
- notebook_config['project_name'],
- notebook_config['application'])
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['application'])
else:
notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
- notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
- notebook_config['application'])
- notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}'.format(notebook_config['service_base_name'],
- os.environ['project_name'],
- os.environ['application'],
- os.environ['notebook_image_name']).lower().replace('_', '-') if (x != 'None' and x != '')
+ notebook_config['service_base_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['application'])
+ notebook_config['notebook_image_name'] = (lambda x: '{0}-{1}-{2}-{3}-{4}'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'], os.environ['notebook_image_name']).replace('_', '-') if (x != 'None' and x != '')
else notebook_config['expected_image_name'])(str(os.environ.get('notebook_image_name')))
print('Searching pre-configured images')
notebook_config['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
- if AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['notebook_image_name']):
+ if AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['notebook_image_name']):
notebook_config['image_name'] = notebook_config['notebook_image_name']
notebook_config['image_type'] = 'pre-configured'
print('Pre-configured image found. Using: {}'.format(notebook_config['notebook_image_name']))
@@ -118,27 +124,26 @@
print('No pre-configured image found. Using default one: {}'.format(notebook_config['image_name']))
except Exception as err:
print("Failed to generate variables dictionary.")
- append_result("Failed to generate variables dictionary.", str(err))
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
- edge_status = AzureMeta().get_instance_status(notebook_config['resource_group_name'],
- '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
- notebook_config['project_name'],
- notebook_config['endpoint_name']))
+ edge_status = AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+ '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name']))
if edge_status != 'running':
logging.info('ERROR: Edge node is unavailable! Aborting...')
print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
+ ssn_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
os.environ['conf_service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
- ssn_hostname)
- append_result("Edge node is unavailable")
+ dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+ ssn_hostname)
+ dlab.fab.append_result("Edge node is unavailable")
sys.exit(1)
except Exception as err:
- print("Failed to verify edge status.")
- append_result("Failed to verify edge status.", str(err))
+ dlab.fab.append_result("Failed to verify edge status.", str(err))
sys.exit(1)
with open('/root/result.json', 'w') as f:
@@ -157,20 +162,20 @@
format(notebook_config['instance_name'], notebook_config['instance_size'], notebook_config['region'],
notebook_config['vpc_name'], notebook_config['network_interface_name'],
notebook_config['security_group_name'], notebook_config['private_subnet_name'],
- notebook_config['service_base_name'], notebook_config['resource_group_name'], initial_user,
- 'None', notebook_config['public_ssh_key'], notebook_config['primary_disk_size'], 'notebook',
- notebook_config['project_name'], notebook_config['instance_storage_account_type'],
- notebook_config['image_name'], notebook_config['image_type'], json.dumps(notebook_config['tags']))
+ notebook_config['service_base_name'], notebook_config['resource_group_name'],
+ notebook_config['initial_user'], 'None', notebook_config['public_ssh_key'],
+ notebook_config['primary_disk_size'], 'notebook', notebook_config['project_name'],
+ notebook_config['instance_storage_account_type'], notebook_config['image_name'],
+ notebook_config['image_type'], json.dumps(notebook_config['tags']))
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
try:
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
except:
print("The instance hasn't been created.")
- append_result("Failed to create instance.", str(err))
+ dlab.fab.append_result("Failed to create instance.", str(err))
sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
index 5dcbf3e..ab3c080 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_start_notebook.py
@@ -24,12 +24,14 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
import argparse
+from fabric.api import *
if __name__ == "__main__":
@@ -40,6 +42,8 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
@@ -51,37 +55,37 @@
print('[START NOTEBOOK]')
try:
print("Starting notebook")
- AzureActions().start_instance(notebook_config['resource_group_name'], notebook_config['notebook_name'])
+ AzureActions.start_instance(notebook_config['resource_group_name'], notebook_config['notebook_name'])
print("Instance {} has been started".format(notebook_config['notebook_name']))
- except Exception as err:
+ except:
traceback.print_exc()
- append_result("Failed to start notebook.", str(err))
raise Exception
- except:
+ except Exception as err:
+ dlab.fab.append_result("Failed to start notebook.", str(err))
sys.exit(1)
try:
logging.info('[SETUP USER GIT CREDENTIALS]')
print('[SETUP USER GIT CREDENTIALS]')
- notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+ notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
- except Exception as err:
+ except:
traceback.print_exc()
- append_result("Failed to setup git credentials.", str(err))
raise Exception
- except:
+ except Exception as err:
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
sys.exit(1)
if os.environ['azure_datalake_enable'] == 'true':
try:
logging.info('[UPDATE STORAGE CREDENTIALS]')
print('[UPDATE STORAGE CREDENTIALS]')
- notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
+ notebook_config['notebook_ip'] = AzureMeta.get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
env.hosts = "{}".format(notebook_config['notebook_ip'])
env.user = os.environ['conf_os_user']
@@ -90,13 +94,14 @@
params = '--refresh_token {}'.format(os.environ['azure_user_refresh_token'])
try:
put('~/scripts/common_notebook_update_refresh_token.py', '/tmp/common_notebook_update_refresh_token.py')
- sudo('mv /tmp/common_notebook_update_refresh_token.py /usr/local/bin/common_notebook_update_refresh_token.py')
+ sudo('mv /tmp/common_notebook_update_refresh_token.py '
+ '/usr/local/bin/common_notebook_update_refresh_token.py')
sudo("/usr/bin/python /usr/local/bin/{}.py {}".format('common_notebook_update_refresh_token', params))
- except Exception as err:
+ except:
traceback.print_exc()
- append_result("Failed to update storage credentials.", str(err))
raise Exception
- except:
+ except Exception as err:
+ dlab.fab.append_result("Failed to update storage credentials.", str(err))
sys.exit(1)
try:
@@ -106,16 +111,16 @@
.format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
try:
local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
- except Exception as err:
+ except:
traceback.print_exc()
- append_result("Failed to update last activity time.", str(err))
raise Exception
- except:
+ except Exception as err:
+ dlab.fab.append_result("Failed to update last activity time.", str(err))
sys.exit(1)
try:
- ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['notebook_name'])
+ ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['notebook_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['notebook_name']))
@@ -126,8 +131,8 @@
"Action": "Start up notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
index 4c4ba17..5e77666 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_stop_notebook.py
@@ -24,9 +24,9 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import uuid
import argparse
@@ -37,26 +37,26 @@
print("Stopping data engine cluster")
cluster_list = []
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if "notebook_name" in vm.tags:
if notebook_name == vm.tags['notebook_name']:
if 'master' == vm.tags["Type"]:
cluster_list.append(vm.tags["Name"])
- AzureActions().stop_instance(resource_group_name, vm.name)
+ AzureActions.stop_instance(resource_group_name, vm.name)
print("Instance {} has been stopped".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to stop clusters", str(err))
sys.exit(1)
print("Stopping notebook")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if "Name" in vm.tags:
if notebook_name == vm.tags["Name"]:
- AzureActions().stop_instance(resource_group_name, vm.name)
+ AzureActions.stop_instance(resource_group_name, vm.name)
print("Instance {} has been stopped".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to stop instance", str(err))
sys.exit(1)
@@ -69,15 +69,17 @@
filename=local_log_filepath)
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
- try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ else:
notebook_config['exploratory_name'] = ''
- try:
- notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ notebook_config['computational_name'] = os.environ['computational_name']
+ else:
notebook_config['computational_name'] = ''
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -87,18 +89,15 @@
try:
stop_notebook(notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to stop notebook.", str(err))
+ dlab.fab.append_result("Failed to stop notebook.", str(err))
sys.exit(1)
-
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Action": "Stop notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
-
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
index e08130d..73eab17 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook.py
@@ -24,34 +24,35 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import uuid
+import traceback
def terminate_nb(resource_group_name, notebook_name):
print("Terminating data engine cluster")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if "notebook_name" in vm.tags:
if notebook_name == vm.tags['notebook_name']:
- AzureActions().remove_instance(resource_group_name, vm.name)
+ AzureActions.remove_instance(resource_group_name, vm.name)
print("Instance {} has been terminated".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate clusters", str(err))
sys.exit(1)
print("Terminating notebook")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if "Name" in vm.tags:
if notebook_name == vm.tags["Name"]:
- AzureActions().remove_instance(resource_group_name, vm.name)
+ AzureActions.remove_instance(resource_group_name, vm.name)
print("Instance {} has been terminated".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate instance", str(err))
sys.exit(1)
@@ -63,15 +64,17 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
- try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
+ else:
notebook_config['exploratory_name'] = ''
- try:
- notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ notebook_config['computational_name'] = os.environ['computational_name']
+ else:
notebook_config['computational_name'] = ''
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
@@ -83,7 +86,7 @@
terminate_nb(notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate notebook.", str(err))
+ dlab.fab.append_result("Failed to terminate notebook.", str(err))
raise Exception
except:
sys.exit(1)
@@ -94,6 +97,6 @@
"Action": "Terminate notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
index ff186ac..c74abfe 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/common_terminate_notebook_image.py
@@ -21,23 +21,26 @@
#
# ******************************************************************************
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-from dlab.fab import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import sys
import json
+import os
if __name__ == "__main__":
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
image_conf = dict()
image_conf['service_base_name'] = os.environ['conf_service_base_name']
image_conf['resource_group_name'] = os.environ['azure_resource_group_name']
image_conf['full_image_name'] = os.environ['notebook_image_name']
- image = AzureMeta().get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+ image = AzureMeta.get_image(image_conf['resource_group_name'], image_conf['full_image_name'])
if image != '':
- AzureActions().remove_image(image_conf['resource_group_name'], image_conf['full_image_name'])
+ AzureActions.remove_image(image_conf['resource_group_name'], image_conf['full_image_name'])
with open("/root/result.json", 'w') as result:
res = {"notebook_image_name": image_conf['full_image_name'],
@@ -45,6 +48,5 @@
"Action": "Delete existing notebook image"}
result.write(json.dumps(res))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to delete existing notebook image", str(err))
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to delete existing notebook image", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
index a31cfeb..8d90b5e 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_configure.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import sys
import os
import uuid
@@ -37,7 +38,7 @@
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
- slave_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], slave_name)
+ slave_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'], slave_name)
try:
logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
print('[CREATING DLAB SSH USER ON SLAVE NODE]')
@@ -51,18 +52,14 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to create ssh user on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON SLAVE]')
logging.info('[INSTALLING USERs KEY ON SLAVE]')
- additional_config = {"user_keyname": os.environ['project_name'],
+ additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
@@ -72,13 +69,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'],
- data_engine['master_node_name'])
- append_result("Failed to install user ssh key on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install user ssh key on slave.", str(err))
sys.exit(1)
try:
@@ -92,12 +84,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to clean slave instance..", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to clean slave instance..", str(err))
sys.exit(1)
try:
@@ -113,12 +101,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to configure proxy on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
@@ -133,13 +117,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to install prerequisites on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
@@ -157,16 +136,18 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed configuring slave node", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to configure slave node.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure slave node.", str(err))
sys.exit(1)
+def clear_resources():
+ for i in range(data_engine['instance_count'] - 1):
+ slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+ AzureActions.remove_instance(data_engine['resource_group_name'], slave_name)
+ AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+
+
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
@@ -176,38 +157,41 @@
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name']
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name']
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
data_engine['region'] = os.environ['azure_region']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['vpc_name'] = os.environ['azure_vpc_name']
- data_engine['user_name'] = os.environ['edge_user_name'].replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- data_engine['endpoint_tag'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['private_subnet_name'] = '{}-{}-subnet'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- data_engine['private_subnet_cidr'] = AzureMeta().get_subnet(data_engine['resource_group_name'],
- data_engine['vpc_name'],
- data_engine['private_subnet_name']).address_prefix
- data_engine['master_security_group_name'] = '{}-{}-dataengine-master-sg'.format(
- data_engine['service_base_name'], data_engine['project_name'])
- data_engine['slave_security_group_name'] = '{}-{}-dataengine-slave-sg'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name']
+ data_engine['project_tag'] = data_engine['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ data_engine['endpoint_tag'] = data_engine['endpoint_name']
+ data_engine['private_subnet_name'] = '{}-{}-{}-subnet'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'])
+ data_engine['private_subnet_cidr'] = AzureMeta.get_subnet(data_engine['resource_group_name'],
+ data_engine['vpc_name'],
+ data_engine['private_subnet_name']).address_prefix
+ data_engine['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
data_engine['project_name'],
- data_engine['exploratory_name'],
+ data_engine['endpoint_name'],
data_engine['computational_name'])
data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
@@ -217,19 +201,20 @@
data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
data_engine['notebook_name'] = os.environ['notebook_instance_name']
- master_node_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
- data_engine['master_node_name'])
+ master_node_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ data_engine['master_node_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'])
- edge_instance_private_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
- edge_instance_name)
+ edge_instance_private_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ edge_instance_name)
+ data_engine['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+ data_engine['region'])
if os.environ['conf_network_type'] == 'private':
- edge_instance_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ edge_instance_name)
else:
- edge_instance_hostname = AzureMeta().get_instance_public_ip_address(data_engine['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = data_engine['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
key = RSA.importKey(open(keyfile_name, 'rb').read())
data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
@@ -240,13 +225,8 @@
initial_user = 'ec2-user'
sudo_group = 'wheel'
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- print("Failed to generate variables dictionary.")
- append_result("Failed to generate variables dictionary.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
@@ -262,18 +242,14 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to create ssh user on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON MASTER]')
logging.info('[INSTALLING USERs KEY ON MASTER]')
- additional_config = {"user_keyname": os.environ['project_name'],
+ additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
@@ -284,12 +260,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to install ssh user key on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install ssh user key on master.", str(err))
sys.exit(1)
@@ -304,12 +276,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to clean master instance.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to clean master instance.", str(err))
sys.exit(1)
try:
@@ -325,12 +293,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to configure proxy on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
@@ -345,13 +309,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to install prerequisites on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
@@ -369,12 +328,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure master node", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+ dlab.fab.append_result("Failed to configure master node", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -389,18 +344,15 @@
if job.exitcode != 0:
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+ dlab.fab.append_result("Failed to configure slave nodes", str(err))
+ clear_resources()
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
- notebook_instance_ip = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
- data_engine['notebook_name'])
+ notebook_instance_ip = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ data_engine['notebook_name'])
additional_info = {
"computational_name": data_engine['computational_name'],
"master_node_hostname": master_node_hostname,
@@ -425,19 +377,16 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+ dlab.fab.append_result("Failed to configure reverse proxy", str(err))
+ clear_resources()
sys.exit(1)
try:
- ip_address = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
- data_engine['master_node_name'])
+ ip_address = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ data_engine['master_node_name'])
spark_master_url = "http://" + ip_address + ":8080"
spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
@@ -463,6 +412,7 @@
}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
index 24855fa..86dc7a9 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_prepare.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import sys
import os
import uuid
@@ -43,44 +44,48 @@
level=logging.INFO,
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
data_engine = dict()
- data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- data_engine['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ data_engine['project_tag'] = data_engine['project_name']
+ data_engine['endpoint_tag'] = data_engine['endpoint_name']
print('Generating infrastructure names and tags')
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name']
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name']
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
data_engine['region'] = os.environ['azure_region']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['vpc_name'] = os.environ['azure_vpc_name']
- data_engine['private_subnet_name'] = '{}-{}-subnet'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- data_engine['private_subnet_cidr'] = AzureMeta().get_subnet(data_engine['resource_group_name'],
- data_engine['vpc_name'],
- data_engine['private_subnet_name']).address_prefix
- data_engine['master_security_group_name'] = '{}-{}-dataengine-master-sg'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- data_engine['slave_security_group_name'] = '{}-{}-dataengine-slave-sg'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+ data_engine['private_subnet_name'] = '{}-{}-{}-subnet'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'])
+ data_engine['private_subnet_cidr'] = AzureMeta.get_subnet(data_engine['resource_group_name'],
+ data_engine['vpc_name'],
+ data_engine['private_subnet_name']).address_prefix
+ data_engine['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
+ data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
data_engine['project_name'],
- data_engine['exploratory_name'],
+ data_engine['endpoint_name'],
data_engine['computational_name'])
data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
data_engine['master_network_interface_name'] = '{}-nif'.format(data_engine['master_node_name'])
data_engine['master_size'] = os.environ['azure_dataengine_master_size']
- key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']), 'rb').read())
+ key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'],
+ os.environ['conf_key_name']), 'rb').read())
data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
@@ -106,20 +111,19 @@
data_engine['image_type'] = 'default'
if os.environ['conf_shared_image_enabled'] == 'false':
- data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(data_engine['service_base_name'],
- os.environ['endpoint_name'],
- os.environ['project_name'],
- os.environ['application'])
+ data_engine['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'],
+ os.environ['application'])
else:
data_engine['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(data_engine['service_base_name'],
- os.environ['endpoint_name'],
+ data_engine['endpoint_name'],
os.environ['application'])
data_engine['notebook_image_name'] = (lambda x: os.environ['notebook_image_name'] if x != 'None'
else data_engine['expected_image_name'])(str(os.environ.get('notebook_image_name')))
print('Searching pre-configured images')
- if AzureMeta().get_image(data_engine['resource_group_name'], data_engine['notebook_image_name']) and \
+ if AzureMeta.get_image(data_engine['resource_group_name'], data_engine['notebook_image_name']) and \
os.environ['application'] in os.environ['dataengine_image_notebooks'].split(','):
data_engine['image_name'] = data_engine['notebook_image_name']
data_engine['image_type'] = 'pre-configured'
@@ -128,26 +132,25 @@
data_engine['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
print('No pre-configured image found. Using default one: {}'.format(data_engine['image_name']))
except Exception as err:
- print("Failed to generate variables dictionary.")
- append_result("Failed to generate variables dictionary. Exception:" + str(err))
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
sys.exit(1)
try:
- edge_status = AzureMeta().get_instance_status(data_engine['resource_group_name'], '{0}-{1}-{2}-edge'.format(os.environ['conf_service_base_name'],
- data_engine['project_name'],
- data_engine['endpoint_name']))
+ edge_status = AzureMeta.get_instance_status(data_engine['resource_group_name'],
+ '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name']))
if edge_status != 'running':
logging.info('ERROR: Edge node is unavailable! Aborting...')
print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = AzureMeta().get_private_ip_address(data_engine['resource_group_name'],
- os.environ['conf_service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
- ssn_hostname)
- append_result("Edge node is unavailable")
+ ssn_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ data_engine['service_base_name'] + '-ssn')
+ dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+ ssn_hostname)
+ dlab.fab.append_result("Edge node is unavailable")
sys.exit(1)
except Exception as err:
- print("Failed to verify edge status.")
- append_result("Failed to verify edge status.", str(err))
+ dlab.fab.append_result("Failed to verify edge status.", str(err))
sys.exit(1)
if os.environ['conf_os_family'] == 'debian':
@@ -182,12 +185,11 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
try:
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+ AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
except:
print("The instance hasn't been created.")
- append_result("Failed to create master instance.", str(err))
+ dlab.fab.append_result("Failed to create master instance.", str(err))
sys.exit(1)
try:
@@ -217,13 +219,12 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
try:
- AzureActions().remove_instance(data_engine['resource_group_name'], slave_name)
+ AzureActions.remove_instance(data_engine['resource_group_name'], slave_name)
except:
print("The slave instance {} hasn't been created.".format(slave_name))
- AzureActions().remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
- append_result("Failed to create slave instances.", str(err))
+ AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
+ dlab.fab.append_result("Failed to create slave instances.", str(err))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
index cf2a613..308912f 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_start.py
@@ -24,23 +24,25 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+import traceback
import uuid
+from fabric.api import *
def start_data_engine(resource_group_name, cluster_name):
print("Starting data engine cluster")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if "Name" in vm.tags:
if cluster_name == vm.tags["Name"]:
- AzureActions().start_instance(resource_group_name, vm.name)
+ AzureActions.start_instance(resource_group_name, vm.name)
print("Instance {} has been started".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to start dataengine", str(err))
sys.exit(1)
@@ -52,23 +54,26 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name']
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name']
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
- data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
data_engine['project_name'],
- data_engine['exploratory_name'],
+ data_engine['endpoint_name'],
data_engine['computational_name'])
try:
logging.info('[STARTING DATA ENGINE]')
@@ -86,8 +91,10 @@
logging.info('[UPDATE LAST ACTIVITY TIME]')
print('[UPDATE LAST ACTIVITY TIME]')
data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
- data_engine['notebook_ip'] = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], os.environ['notebook_instance_name'])
- data_engine['computational_ip'] = AzureMeta().get_private_ip_address(data_engine['resource_group_name'], data_engine['computational_id'])
+ data_engine['notebook_ip'] = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ os.environ['notebook_instance_name'])
+ data_engine['computational_ip'] = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
+ data_engine['computational_id'])
data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
.format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -96,18 +103,17 @@
local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed to update last activity time.", str(err))
+ dlab.fab.append_result("Failed to update last activity time.", str(err))
raise Exception
except:
sys.exit(1)
-
try:
with open("/root/result.json", 'w') as result:
res = {"service_base_name": data_engine['service_base_name'],
"Action": "Start Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
index ef1521f..963c555 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_stop.py
@@ -24,23 +24,24 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+import traceback
import uuid
def stop_data_engine(resource_group_name, cluster_name):
print("Stopping data engine cluster")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if "Name" in vm.tags:
if cluster_name == vm.tags["Name"]:
- AzureActions().stop_instance(resource_group_name, vm.name)
+ AzureActions.stop_instance(resource_group_name, vm.name)
print("Instance {} has been stopped".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to stop dataengine", str(err))
sys.exit(1)
@@ -52,23 +53,26 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name']
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name']
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
- data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
- data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+ data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
data_engine['project_name'],
- data_engine['exploratory_name'],
+ data_engine['endpoint_name'],
data_engine['computational_name'])
try:
logging.info('[STOPPING DATA ENGINE]')
@@ -77,7 +81,7 @@
stop_data_engine(data_engine['resource_group_name'], data_engine['cluster_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to stop Data Engine.", str(err))
+ dlab.fab.append_result("Failed to stop Data Engine.", str(err))
raise Exception
except:
sys.exit(1)
@@ -88,6 +92,6 @@
"Action": "Stop Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
index 974fc3e..1363eb8 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/dataengine_terminate.py
@@ -24,30 +24,31 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+import traceback
import uuid
def terminate_data_engine(resource_group_name, notebook_name, os_user, key_path, cluster_name):
print("Terminating data engine cluster")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if "Name" in vm.tags:
if cluster_name == vm.tags["Name"]:
- AzureActions().remove_instance(resource_group_name, vm.name)
+ AzureActions.remove_instance(resource_group_name, vm.name)
print("Instance {} has been terminated".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate dataengine", str(err))
sys.exit(1)
print("Removing Data Engine kernels from notebook")
try:
- AzureActions().remove_dataengine_kernels(resource_group_name, notebook_name, os_user, key_path, cluster_name)
+ AzureActions.remove_dataengine_kernels(resource_group_name, notebook_name, os_user, key_path, cluster_name)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove dataengine kernels from notebook", str(err))
sys.exit(1)
@@ -59,23 +60,26 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name']
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name']
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
- data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['cluster_name'] = '{}-{}-de-{}-{}'.format(data_engine['service_base_name'],
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name']
+ data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
data_engine['project_name'],
- data_engine['exploratory_name'],
+ data_engine['endpoint_name'],
data_engine['computational_name'])
data_engine['notebook_name'] = os.environ['notebook_instance_name']
data_engine['key_path'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
@@ -89,7 +93,7 @@
os.environ['conf_os_user'], data_engine['key_path'], data_engine['cluster_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate Data Engine.", str(err))
+ dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
raise Exception
except:
sys.exit(1)
@@ -100,6 +104,6 @@
"Action": "Terminate Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
index 3a64bf6..9c8f24c 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/deeplearning_configure.py
@@ -24,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
if __name__ == "__main__":
@@ -39,33 +41,36 @@
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['user_keyname'] = os.environ['project_name']
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
notebook_config['image_enabled'] = os.environ['conf_image_enabled']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
if notebook_config['shared_image_enabled'] == 'false':
notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
notebook_config['project_name'],
+ notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"project_tag": notebook_config['project_tag'],
@@ -77,7 +82,7 @@
notebook_config['service_base_name'],
notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"endpoint_tag": notebook_config['endpoint_tag'],
@@ -85,7 +90,8 @@
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
@@ -94,45 +100,45 @@
"endpoint_tag": notebook_config['endpoint_tag'],
"Exploratory": notebook_config['exploratory_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
# generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
notebook_config['endpoint_name'])
- edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
+ notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+ os.environ['azure_region'])
if os.environ['conf_network_type'] == 'private':
- edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
else:
- edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = notebook_config['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate variables dictionary", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -140,9 +146,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab-user'.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -151,16 +156,16 @@
print('[CONFIGURE PROXY ON DEEP LEARNING INSTANCE]')
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
- .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -173,12 +178,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -194,9 +198,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -206,7 +209,7 @@
"--os_user {2} --jupyter_version {3} " \
"--scala_version {4} --spark_version {5} " \
"--hadoop_version {6} --region {7} " \
- "--r_mirror {8} --ip_adress {9} --exploratory_name {10} --edge_ip {11}" \
+ "--r_mirror {8} --ip_address {9} --exploratory_name {10} --edge_ip {11}" \
.format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
os.environ['notebook_jupyter_version'], os.environ['notebook_scala_version'],
os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
@@ -214,15 +217,14 @@
notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
try:
local("~/scripts/{}.py {}".format('configure_deep_learning_node', params))
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
- os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure Deep Learning node.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -233,12 +235,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -254,44 +255,44 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+ image = AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
if image == '':
print("Looks like it's first time we configure notebook server. Creating image.")
- prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
- notebook_config['instance_name'],
- os.environ['azure_region'],
- notebook_config['expected_image_name'],
- json.dumps(notebook_config['image_tags']))
+ dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+ notebook_config['instance_name'],
+ os.environ['azure_region'],
+ notebook_config['expected_image_name'],
+ json.dumps(notebook_config['image_tags']))
print("Image was successfully created.")
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
- if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
+ if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
notebook_config['instance_name']) == 'running':
instance_running = True
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
notebook_config['instance_name'])
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
- 'http://{}:3128'.format(edge_instance_private_hostname))
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+ 'http://{}:3128'.format(edge_instance_private_hostname))
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name,
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -316,17 +317,16 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
- ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
tensorboard_ip_url = 'http://' + ip_address + ':6006'
jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
@@ -373,7 +373,6 @@
]}
result.write(json.dumps(res))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate output information.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to generate output information.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py b/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
index fd2f940..997d38f 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
@@ -22,10 +22,16 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
+import uuid
+from fabric.api import *
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -34,112 +40,121 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
+
+ def clear_resources():
+ AzureActions.remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+ AzureActions.remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
+ edge_conf['private_subnet_name'])
+ AzureActions.remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(edge_conf['resource_group_name'],
+ edge_conf['notebook_security_group_name'])
+ AzureActions.remove_security_group(edge_conf['resource_group_name'],
+ edge_conf['master_security_group_name'])
+ AzureActions.remove_security_group(edge_conf['resource_group_name'],
+ edge_conf['slave_security_group_name'])
+ for storage_account in AzureMeta.list_storage_accounts(edge_conf['resource_group_name']):
+ if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
+ AzureActions.remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
+ if os.environ['azure_datalake_enable'] == 'true':
+ for datalake in AzureMeta.list_datalakes(edge_conf['resource_group_name']):
+ if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
+ AzureActions.remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+
try:
print('Generating infrastructure names and tags')
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
edge_conf = dict()
-
- edge_conf['service_base_name'] = os.environ['conf_service_base_name']
+ edge_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
edge_conf['key_name'] = os.environ['conf_key_name']
edge_conf['vpc_name'] = os.environ['azure_vpc_name']
edge_conf['region'] = os.environ['azure_region']
edge_conf['subnet_name'] = os.environ['azure_subnet_name']
- edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- edge_conf['user_keyname'] = os.environ['project_name']
- edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-subnet'
+ edge_conf['project_name'] = (os.environ['project_name'])
+ edge_conf['endpoint_name'] = (os.environ['endpoint_name'])
+ edge_conf['user_keyname'] = edge_conf['project_name']
+ edge_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
- edge_conf['network_interface_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + \
- '-edge-nif'
- edge_conf['static_public_ip_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + \
- '-edge-ip'
- edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0'
- edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + edge_conf['region'] + \
- '.cloudapp.azure.com'
- edge_conf['user_storage_account_name'] = '{0}-{1}-{2}-storage'.format(edge_conf['service_base_name'],
- edge_conf['project_name'],
- edge_conf['endpoint_name'])
- edge_conf['user_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-' + edge_conf['endpoint_name'] +
- '-container').lower()
- edge_conf['shared_storage_account_name'] = '{0}-{1}-shared-storage'.format(edge_conf['service_base_name'],
- edge_conf['endpoint_name'])
- edge_conf['shared_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['endpoint_name'] + '-shared-container').lower()
- edge_conf['datalake_store_name'] = edge_conf['service_base_name'] + '-ssn-datalake'
- edge_conf['datalake_shared_directory_name'] = edge_conf['service_base_name'] + '-shared-folder'
- edge_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(edge_conf['service_base_name'],
- edge_conf['project_name'])
- edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg'
- edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + "-" + os.environ['endpoint_name'] +\
- '-nb-sg'
- edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \
- + edge_conf['project_name'] + '-dataengine-master-sg'
- edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \
- + edge_conf['project_name'] + '-dataengine-slave-sg'
+ edge_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_conf['instance_name'],
+ edge_conf['region'])
+ edge_conf['user_storage_account_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name']).lower()
+ edge_conf['user_container_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name']).lower()
+ edge_conf['shared_storage_account_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['endpoint_name']).lower()
+ edge_conf['shared_container_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['endpoint_name']).lower()
+ edge_conf['datalake_store_name'] = '{}-ssn-datalake'.format(edge_conf['service_base_name'])
+ edge_conf['datalake_shared_directory_name'] = '{}-shared-folder'.format(edge_conf['service_base_name'])
+ edge_conf['datalake_user_directory_name'] = '{0}-{1}-{2}-folder'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['edge_security_group_name'] = '{}-sg'.format(edge_conf['instance_name'])
+ edge_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
- keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
- edge_conf['private_subnet_cidr'] = AzureMeta().get_subnet(edge_conf['resource_group_name'],
- edge_conf['vpc_name'],
- edge_conf['private_subnet_name']).address_prefix
+ edge_conf['keyfile_name'] = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name'])
+ edge_conf['private_subnet_cidr'] = AzureMeta.get_subnet(edge_conf['resource_group_name'],
+ edge_conf['vpc_name'],
+ edge_conf['private_subnet_name']).address_prefix
if os.environ['conf_network_type'] == 'private':
- edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
- edge_conf['instance_name'])
- edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
+ edge_conf['edge_private_ip'] = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+ edge_conf['instance_name'])
+ edge_conf['edge_public_ip'] = edge_conf['edge_private_ip']
+ edge_conf['instance_hostname'] = edge_conf['edge_private_ip']
else:
- edge_conf['edge_public_ip'] = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
- edge_conf['instance_name'])
- edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
- edge_conf['instance_name'])
- instance_hostname = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
- edge_conf['instance_name'])
- edge_conf['vpc_cidrs'] = AzureMeta().get_vpc(edge_conf['resource_group_name'],
- edge_conf['vpc_name']).address_space.address_prefixes
+ edge_conf['edge_public_ip'] = AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+ edge_conf['instance_name'])
+ edge_conf['edge_private_ip'] = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+ edge_conf['instance_name'])
+ edge_conf['instance_hostname'] = edge_conf['instance_dns_name']
+ edge_conf['vpc_cidrs'] = AzureMeta.get_vpc(edge_conf['resource_group_name'],
+ edge_conf['vpc_name']).address_space.address_prefixes
if os.environ['conf_stepcerts_enabled'] == 'true':
- step_cert_sans = ' --san {0} --san {1} '.format(AzureMeta().get_private_ip_address(
- edge_conf['resource_group_name'], edge_conf['instance_name']), edge_conf['instance_dns_name'])
+ edge_conf['step_cert_sans'] = ' --san {0} '.format(AzureMeta.get_private_ip_address(
+ edge_conf['resource_group_name'], edge_conf['instance_name']))
if os.environ['conf_network_type'] == 'public':
- step_cert_sans += ' --san {0}'.format(
- AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
- edge_conf['instance_name']))
+ edge_conf['step_cert_sans'] += ' --san {0} --san {1} '.format(
+ AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+ edge_conf['instance_name']),
+ edge_conf['instance_dns_name'])
else:
- step_cert_sans = ''
+ edge_conf['step_cert_sans'] = ''
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate infrastructure names", str(err))
- AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
- AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
- edge_conf['private_subnet_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['master_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
- if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
- if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
- if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+ dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+ clear_resources()
sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ edge_conf['initial_user'] = 'ubuntu'
+ edge_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ edge_conf['initial_user'] = 'ec2-user'
+ edge_conf['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- edge_conf['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ edge_conf['instance_hostname'], os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -147,57 +162,24 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
- AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
- edge_conf['private_subnet_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['master_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
- if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
- if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
- if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ clear_resources()
sys.exit(1)
try:
print('[INSTALLING PREREQUISITES]')
logging.info('[INSTALLING PREREQUISITES]')
- params = "--hostname {} --keyfile {} --user {} --region {}".\
- format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['azure_region'])
+ params = "--hostname {} --keyfile {} --user {} --region {}".format(
+ edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+ os.environ['azure_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
- AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
- edge_conf['private_subnet_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['master_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
- if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
- if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
- if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -205,40 +187,24 @@
logging.info('[INSTALLING HTTP PROXY]')
additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
"template_file": "/root/templates/squid.conf",
- "project_name": os.environ['project_name'],
+ "project_name": edge_conf['project_name'],
"ldap_host": os.environ['ldap_hostname'],
"ldap_dn": os.environ['ldap_dn'],
"ldap_user": os.environ['ldap_service_username'],
"ldap_password": os.environ['ldap_service_password'],
"vpc_cidrs": edge_conf['vpc_cidrs'],
"allowed_ip_cidr": ['0.0.0.0/0']}
- params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
- .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+ params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
+ edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+ edge_conf['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('configure_http_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing http proxy.", str(err))
- AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
- AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
- edge_conf['private_subnet_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['master_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
- if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
- if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
- if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+ dlab.fab.append_result("Failed installing http proxy.", str(err))
+ clear_resources()
sys.exit(1)
@@ -248,43 +214,27 @@
additional_config = {"user_keyname": edge_conf['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+ edge_conf['instance_hostname'], edge_conf['keyfile_name'], json.dumps(additional_config),
+ edge_conf['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key. Excpeption: " + str(err))
- AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
- AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
- edge_conf['private_subnet_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['master_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
- if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
- if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
- if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+ dlab.fab.append_result("Failed installing users key. Excpeption: " + str(err))
+ clear_resources()
sys.exit(1)
try:
print('[INSTALLING NGINX REVERSE PROXY]')
logging.info('[INSTALLING NGINX REVERSE PROXY]')
- keycloak_client_secret = str(uuid.uuid4())
+ edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
- "--step_cert_sans '{}'" \
- .format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'],
- edge_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'], keycloak_client_secret,
- step_cert_sans)
+ "--step_cert_sans '{}'".format(
+ edge_conf['instance_hostname'], edge_conf['keyfile_name'], edge_conf['dlab_ssh_user'],
+ edge_conf['service_base_name'] + '-' + edge_conf['project_name'] + '-' + edge_conf['endpoint_name'],
+ edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
try:
local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
@@ -293,44 +243,28 @@
raise Exception
keycloak_params = "--service_base_name {} --keycloak_auth_server_url {} --keycloak_realm_name {} " \
"--keycloak_user {} --keycloak_user_password {} --keycloak_client_secret {} " \
- "--edge_public_ip {} --project_name {} --endpoint_name {} " \
- .format(edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
- os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
- os.environ['keycloak_user_password'],
- keycloak_client_secret, edge_conf['edge_public_ip'], os.environ['project_name'], os.environ['endpoint_name'])
+ "--edge_public_ip {} --project_name {} --endpoint_name {} ".format(
+ edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
+ os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
+ os.environ['keycloak_user_password'],
+ edge_conf['keycloak_client_secret'], edge_conf['instance_hostname'], edge_conf['project_name'],
+ edge_conf['endpoint_name'])
try:
local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing Nginx reverse proxy. Excpeption: " + str(err))
- AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
- AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'],
- edge_conf['private_subnet_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['master_security_group_name'])
- AzureActions().remove_security_group(edge_conf['resource_group_name'],
- edge_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
- if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name)
- if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
- if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name'])
+ dlab.fab.append_result("Failed installing Nginx reverse proxy. Excpeption: " + str(err))
+ clear_resources()
sys.exit(1)
try:
- for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']):
+ for storage_account in AzureMeta.list_storage_accounts(edge_conf['resource_group_name']):
if edge_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
- shared_storage_account_name = storage_account.name
+ edge_conf['shared_storage_account_name'] = storage_account.name
if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]:
- user_storage_account_name = storage_account.name
+ edge_conf['user_storage_account_name'] = storage_account.name
print('[SUMMARY]')
logging.info('[SUMMARY]')
@@ -339,13 +273,13 @@
print("Public IP: {}".format(edge_conf['edge_public_ip']))
print("Private IP: {}".format(edge_conf['edge_private_ip']))
print("Key name: {}".format(edge_conf['key_name']))
- print("User storage account name: {}".format(user_storage_account_name))
+ print("User storage account name: {}".format(edge_conf['user_storage_account_name']))
print("User container name: {}".format(edge_conf['user_container_name']))
if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']):
+ for datalake in AzureMeta.list_datalakes(edge_conf['resource_group_name']):
if edge_conf['datalake_store_name'] == datalake.tags["Name"]:
- datalake_id = datalake.name
- print("Data Lake name: {}".format(datalake_id))
+ edge_conf['datalake_id'] = datalake.name
+ print("Data Lake name: {}".format(edge_conf['datalake_id']))
print("Data Lake tag name: {}".format(edge_conf['datalake_store_name']))
print("Data Lake Store user directory name: {}".format(edge_conf['datalake_user_directory_name']))
print("Notebook SG: {}".format(edge_conf['notebook_security_group_name']))
@@ -357,9 +291,9 @@
"public_ip": edge_conf['edge_public_ip'],
"ip": edge_conf['edge_private_ip'],
"key_name": edge_conf['key_name'],
- "user_storage_account_name": user_storage_account_name,
+ "user_storage_account_name": edge_conf['user_storage_account_name'],
"user_container_name": edge_conf['user_container_name'],
- "shared_storage_account_name": shared_storage_account_name,
+ "shared_storage_account_name": edge_conf['shared_storage_account_name'],
"shared_container_name": edge_conf['shared_container_name'],
"user_storage_account_tag_name": edge_conf['user_storage_account_name'],
"tunnel_port": "22",
@@ -369,7 +303,7 @@
"notebook_subnet": edge_conf['private_subnet_cidr'],
"instance_id": edge_conf['instance_name'],
"full_edge_conf": edge_conf,
- "project_name": os.environ['project_name'],
+ "project_name": edge_conf['project_name'],
"@class": "com.epam.dlab.dto.azure.edge.EdgeInfoAzure",
"Action": "Create new EDGE server"}
else:
@@ -377,12 +311,12 @@
"public_ip": edge_conf['edge_public_ip'],
"ip": edge_conf['edge_private_ip'],
"key_name": edge_conf['key_name'],
- "user_storage_account_name": user_storage_account_name,
+ "user_storage_account_name": edge_conf['user_storage_account_name'],
"user_container_name": edge_conf['user_container_name'],
- "shared_storage_account_name": shared_storage_account_name,
+ "shared_storage_account_name": edge_conf['shared_storage_account_name'],
"shared_container_name": edge_conf['shared_container_name'],
"user_storage_account_tag_name": edge_conf['user_storage_account_name'],
- "datalake_name": datalake_id,
+ "datalake_name": edge_conf['datalake_id'],
"datalake_tag_name": edge_conf['datalake_store_name'],
"datalake_shared_directory_name": edge_conf['datalake_shared_directory_name'],
"datalake_user_directory_name": edge_conf['datalake_user_directory_name'],
@@ -393,11 +327,12 @@
"notebook_subnet": edge_conf['private_subnet_cidr'],
"instance_id": edge_conf['instance_name'],
"full_edge_conf": edge_conf,
- "project_name": os.environ['project_name'],
+ "project_name": edge_conf['project_name'],
"@class": "com.epam.dlab.dto.azure.edge.EdgeInfoAzure",
"Action": "Create new EDGE server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
index d4bda85..9dc1b01 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_prepare.py
@@ -59,14 +59,14 @@
edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
edge_conf['instance_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge'
- edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0'
+ edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-volume-primary'
edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg'
edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + "-" + os.environ['endpoint_name']\
+ '-nb-sg'
edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \
- + edge_conf['user_name'] + '-dataengine-master-sg'
+ + edge_conf['user_name'] + '-de-master-sg'
edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \
- + edge_conf['user_name'] + '-dataengine-slave-sg'
+ + edge_conf['user_name'] + '-de-slave-sg'
edge_conf['edge_storage_account_name'] = ('{0}-{1}-{2}-storage'.format(edge_conf['service_base_name'],
edge_conf['user_name'],
edge_conf['endpoint_name']))
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_start.py b/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
index 445f48d..04e57ae 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_start.py
@@ -21,9 +21,13 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import logging
import sys
+import json
if __name__ == "__main__":
@@ -35,29 +39,31 @@
filename=local_log_filepath)
print('Generating infrastructure names and tags')
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
edge_conf = dict()
edge_conf['service_base_name'] = os.environ['conf_service_base_name']
edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
- edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+ edge_conf['project_name'] = os.environ['project_name']
+ edge_conf['endpoint_name'] = os.environ['endpoint_name']
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
- edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + os.environ['azure_region'] + '.cloudapp.azure.com'
+ edge_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_conf['instance_name'],
+ os.environ['azure_region'])
logging.info('[START EDGE]')
print('[START EDGE]')
try:
- AzureActions().start_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+ AzureActions.start_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to start edge.", str(err))
+ dlab.fab.append_result("Failed to start edge.", str(err))
sys.exit(1)
try:
- public_ip_address = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'],
- edge_conf['instance_name'])
- private_ip_address = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'],
- edge_conf['instance_name'])
+ public_ip_address = AzureMeta.get_instance_public_ip_address(edge_conf['resource_group_name'],
+ edge_conf['instance_name'])
+ private_ip_address = AzureMeta.get_private_ip_address(edge_conf['resource_group_name'],
+ edge_conf['instance_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(edge_conf['instance_name']))
@@ -72,7 +78,7 @@
"Action": "Start up notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_status.py b/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
index 8c16d12..1b3fd15 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_status.py
@@ -23,14 +23,20 @@
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+from fabric.api import *
+import traceback
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/edge/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
@@ -44,13 +50,12 @@
logging.info('[COLLECT DATA]')
print('[COLLECTING DATA]')
params = '--resource_group_name {} --list_resources "{}"'.format(edge_conf['resource_group_name'],
- os.environ['edge_list_resources'])
+ os.environ['edge_list_resources'])
try:
local("~/scripts/{}.py {}".format('common_collect_data', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to collect necessary information.", str(err))
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to collect necessary information.", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py b/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
index 1bb319b..dfc4cba 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_stop.py
@@ -20,10 +20,13 @@
# under the License.
#
# ******************************************************************************
-
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import os
import sys
+import json
if __name__ == "__main__":
@@ -35,21 +38,22 @@
filename=local_log_filepath)
print('Generating infrastructure names and tags')
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
edge_conf = dict()
edge_conf['service_base_name'] = os.environ['conf_service_base_name']
edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
- edge_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+ edge_conf['project_name'] = os.environ['project_name']
+ edge_conf['endpoint_name'] = os.environ['endpoint_name']
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
logging.info('[STOP EDGE]')
print('[STOP EDGE]')
try:
- AzureActions().stop_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
+ AzureActions.stop_instance(edge_conf['resource_group_name'], edge_conf['instance_name'])
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to stop edge.", str(err))
+ dlab.fab.append_result("Failed to stop edge.", str(err))
sys.exit(1)
try:
@@ -58,7 +62,7 @@
"Action": "Stop edge server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
index d785f23..a61c75d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/edge_terminate.py
@@ -22,128 +22,137 @@
# ******************************************************************************
import json
-import sys, time, os
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
def terminate_edge_node(resource_group_name, service_base_name, project_tag, subnet_name, vpc_name):
print("Terminating EDGE, notebook and dataengine virtual machines")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
try:
if project_tag == vm.tags["project_tag"]:
- AzureActions().remove_instance(resource_group_name, vm.name)
+ AzureActions.remove_instance(resource_group_name, vm.name)
print("Instance {} has been terminated".format(vm.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate instance", str(err))
sys.exit(1)
print("Removing network interfaces")
try:
- for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+ for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
try:
if project_tag == network_interface.tags["project_tag"]:
- AzureActions().delete_network_if(resource_group_name, network_interface.name)
+ AzureActions.delete_network_if(resource_group_name, network_interface.name)
print("Network interface {} has been removed".format(network_interface.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove network interfaces", str(err))
sys.exit(1)
print("Removing static public IPs")
try:
- for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+ for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
try:
if project_tag in static_public_ip.tags["project_tag"]:
- AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+ AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
print("Static public IP {} has been removed".format(static_public_ip.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove static IPs", str(err))
sys.exit(1)
print("Removing disks")
try:
- for disk in AzureMeta().list_disks(resource_group_name):
+ for disk in AzureMeta.list_disks(resource_group_name):
try:
if project_tag in disk.tags["project_tag"]:
- AzureActions().remove_disk(resource_group_name, disk.name)
+ AzureActions.remove_disk(resource_group_name, disk.name)
print("Disk {} has been removed".format(disk.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove disks", str(err))
sys.exit(1)
print("Removing storage account")
try:
- for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+ for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
try:
if project_tag == storage_account.tags["project_tag"]:
- AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+ AzureActions.remove_storage_account(resource_group_name, storage_account.name)
print("Storage account {} has been terminated".format(storage_account.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove storage accounts", str(err))
sys.exit(1)
print("Deleting Data Lake Store directory")
try:
- for datalake in AzureMeta().list_datalakes(resource_group_name):
+ for datalake in AzureMeta.list_datalakes(resource_group_name):
try:
if service_base_name == datalake.tags["SBN"]:
- AzureActions().remove_datalake_directory(datalake.name, project_tag + '-folder')
+ AzureActions.remove_datalake_directory(datalake.name, project_tag + '-folder')
print("Data Lake Store directory {} has been deleted".format(project_tag + '-folder'))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove Data Lake", str(err))
sys.exit(1)
print("Removing security groups")
try:
- for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+ for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
try:
if project_tag == sg.tags["project_tag"]:
- AzureActions().remove_security_group(resource_group_name, sg.name)
+ AzureActions.remove_security_group(resource_group_name, sg.name)
print("Security group {} has been terminated".format(sg.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove security groups", str(err))
sys.exit(1)
print("Removing private subnet")
try:
- AzureActions().remove_subnet(resource_group_name, vpc_name, subnet_name)
+ AzureActions.remove_subnet(resource_group_name, vpc_name, subnet_name)
print("Private subnet {} has been terminated".format(subnet_name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove subnet", str(err))
sys.exit(1)
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/edge/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
print('Generating infrastructure names and tags')
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
edge_conf = dict()
edge_conf['service_base_name'] = os.environ['conf_service_base_name']
edge_conf['resource_group_name'] = os.environ['azure_resource_group_name']
- edge_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-')
- edge_conf['project_name'] = os.environ['project_name'].replace('_', '-')
- edge_conf['project_tag'] = os.environ['project_name'].replace('_', '-')
- edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + "-" + edge_conf['project_name'] + '-subnet'
+ edge_conf['user_name'] = os.environ['edge_user_name']
+ edge_conf['project_name'] = os.environ['project_name']
+ edge_conf['project_tag'] = edge_conf['project_name']
+ edge_conf['endpoint_name'] = os.environ['endpoint_name']
+ edge_conf['private_subnet_name'] = "{}-{}-{}-subnet".format(edge_conf['service_base_name'],
+ edge_conf['project_name'], edge_conf['endpoint_name'])
edge_conf['vpc_name'] = os.environ['azure_vpc_name']
@@ -153,10 +162,11 @@
try:
terminate_edge_node(edge_conf['resource_group_name'], edge_conf['service_base_name'],
edge_conf['project_tag'], edge_conf['private_subnet_name'], edge_conf['vpc_name'])
- except Exception as err:
+ except:
traceback.print_exc()
- append_result("Failed to terminate edge.", str(err))
- except:
+ raise Exception
+ except Exception as err:
+ dlab.fab.append_result("Failed to terminate edge.", str(err))
sys.exit(1)
try:
@@ -166,6 +176,6 @@
"Action": "Terminate edge node"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
index 8d9ac96..49f9872 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/jupyter_configure.py
@@ -24,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
if __name__ == "__main__":
@@ -38,53 +40,57 @@
level=logging.DEBUG,
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['user_keyname'] = os.environ['project_name']
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
notebook_config['image_enabled'] = os.environ['conf_image_enabled']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
if notebook_config['shared_image_enabled'] == 'false':
notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
- notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
- "SBN": notebook_config['service_base_name'],
- "User": notebook_config['user_name'],
- "project_tag": notebook_config['project_tag'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "Exploratory": notebook_config['exploratory_name'],
- os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+ "SBN": notebook_config['service_base_name'],
+ "User": notebook_config['user_name'],
+ "project_tag": notebook_config['project_tag'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "Exploratory": notebook_config['exploratory_name'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
else:
notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
notebook_config['service_base_name'],
notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
- "SBN": notebook_config['service_base_name'],
- "User": notebook_config['user_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "Exploratory": notebook_config['exploratory_name'],
- os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+ "SBN": notebook_config['service_base_name'],
+ "User": notebook_config['user_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "Exploratory": notebook_config['exploratory_name'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
@@ -93,45 +99,45 @@
"endpoint_tag": notebook_config['endpoint_tag'],
"Exploratory": notebook_config['exploratory_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
# generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
notebook_config['endpoint_name'])
- edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
+ notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+ os.environ['azure_region'])
if os.environ['conf_network_type'] == 'private':
- edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
else:
- edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = notebook_config['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate variables dictionary.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -139,9 +145,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab-user'.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -158,9 +163,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -176,9 +180,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
@@ -189,7 +192,7 @@
"--region {2} --spark_version {3} " \
"--hadoop_version {4} --os_user {5} " \
"--scala_version {6} --r_mirror {7} " \
- "--ip_adress {8} --exploratory_name {9} --edge_ip {10}".\
+ "--ip_address {8} --exploratory_name {9} --edge_ip {10}".\
format(instance_hostname, keyfile_name,
os.environ['azure_region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
@@ -197,15 +200,14 @@
notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
try:
local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
- os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure jupyter.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure jupyter.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -218,12 +220,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -235,12 +236,11 @@
# local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -256,44 +256,45 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+ image = AzureMeta.get_image(notebook_config['resource_group_name'],
+ notebook_config['expected_image_name'])
if image == '':
print("Looks like it's first time we configure notebook server. Creating image.")
- prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
- notebook_config['instance_name'],
- os.environ['azure_region'],
- notebook_config['expected_image_name'],
- json.dumps(notebook_config['image_tags']))
+ dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+ notebook_config['instance_name'],
+ os.environ['azure_region'],
+ notebook_config['expected_image_name'],
+ json.dumps(notebook_config['image_tags']))
print("Image was successfully created.")
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
- if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
- notebook_config['instance_name']) == 'running':
+ if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+ notebook_config['instance_name']) == 'running':
instance_running = True
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
- 'http://{}:3128'.format(edge_instance_private_hostname))
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+ 'http://{}:3128'.format(edge_instance_private_hostname))
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name,
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image from notebook.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -318,18 +319,17 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
try:
- ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
@@ -368,6 +368,6 @@
]}
result.write(json.dumps(res))
except Exception as err:
- append_result("Failed to generate output information", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
index eaf8b75..4c44bbd 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/jupyterlab_configure.py
@@ -24,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
+import traceback
if __name__ == "__main__":
@@ -38,54 +40,57 @@
level=logging.DEBUG,
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['user_keyname'] = os.environ['project_name']
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'],
- os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
notebook_config['image_enabled'] = os.environ['conf_image_enabled']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
if notebook_config['shared_image_enabled'] == 'false':
notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
- notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
- notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
- "SBN": notebook_config['service_base_name'],
- "User": notebook_config['user_name'],
- "project_tag": notebook_config['project_tag'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "Exploratory": notebook_config['exploratory_name'],
- os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+ "SBN": notebook_config['service_base_name'],
+ "User": notebook_config['user_name'],
+ "project_tag": notebook_config['project_tag'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "Exploratory": notebook_config['exploratory_name'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
else:
notebook_config['expected_image_name'] = '{0}-{1}-{2}-notebook-image'.format(
notebook_config['service_base_name'],
notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
- "SBN": notebook_config['service_base_name'],
- "User": notebook_config['user_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "Exploratory": notebook_config['exploratory_name'],
- os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
+ "SBN": notebook_config['service_base_name'],
+ "User": notebook_config['user_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "Exploratory": notebook_config['exploratory_name'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
- notebook_config['security_group_name'] = '{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- notebook_config['project_name'])
+ notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
@@ -94,26 +99,27 @@
"endpoint_tag": notebook_config['endpoint_tag'],
"Exploratory": notebook_config['exploratory_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
# generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
notebook_config['endpoint_name'])
- edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
+ notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+ os.environ['azure_region'])
if os.environ['conf_network_type'] == 'private':
- edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
else:
- edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = notebook_config['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
@@ -122,9 +128,8 @@
initial_user = 'ec2-user'
sudo_group = 'wheel'
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate variables dictionary.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -140,9 +145,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab-user'.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -159,9 +163,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -177,9 +180,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
@@ -190,7 +192,7 @@
"--region {2} --spark_version {3} " \
"--hadoop_version {4} --os_user {5} " \
"--scala_version {6} --r_mirror {7} " \
- "--ip_adress {8} --exploratory_name {9} --edge_ip {10}".\
+ "--ip_address {8} --exploratory_name {9} --edge_ip {10}".\
format(instance_hostname, keyfile_name,
os.environ['azure_region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
@@ -198,15 +200,13 @@
notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
try:
local("~/scripts/{}.py {}".format('configure_jupyterlab_node', params))
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
- os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure jupyter.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -219,12 +219,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -236,23 +235,22 @@
# local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
-
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+ image = AzureMeta.get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
if image == '':
print("Looks like it's first time we configure notebook server. Creating image.")
- prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
+ dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
notebook_config['instance_name'],
os.environ['azure_region'],
notebook_config['expected_image_name'],
@@ -261,23 +259,23 @@
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
- if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
- notebook_config['instance_name']) == 'running':
+ if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+ notebook_config['instance_name']) == 'running':
instance_running = True
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
- 'http://{}:3128'.format(edge_instance_private_hostname))
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+ 'http://{}:3128'.format(edge_instance_private_hostname))
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name,
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image from notebook.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image from notebook.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -302,12 +300,11 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -325,8 +322,7 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy for docker.", str(err))
+ dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
@@ -346,15 +342,14 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to start Jupyter container.", str(err))
+ dlab.fab.append_result("Failed to start Jupyter container.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# generating output information
try:
- ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
@@ -393,6 +388,6 @@
]}
result.write(json.dumps(res))
except Exception as err:
- append_result("Failed to generate output information", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
index f5c98ab..e2d481d 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/project_prepare.py
@@ -22,16 +22,22 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os, re
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import re
import traceback
from Crypto.PublicKey import RSA
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+from fabric.api import *
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/project/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
@@ -39,12 +45,15 @@
try:
print('Generating infrastructure names and tags')
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
project_conf = dict()
- project_conf['service_base_name'] = os.environ['conf_service_base_name']
- project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- project_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+ project_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
+ project_conf['project_name'] = (os.environ['project_name'])
+ project_conf['project_tag'] = project_conf['project_name']
+ project_conf['endpoint_name'] = (os.environ['endpoint_name'])
+ project_conf['endpoint_tag'] = project_conf['endpoint_name']
project_conf['resource_group_name'] = os.environ['azure_resource_group_name']
project_conf['azure_ad_user_name'] = os.environ['azure_iam_user']
@@ -52,11 +61,15 @@
project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
project_conf['vpc_name'] = os.environ['azure_vpc_name']
project_conf['subnet_name'] = os.environ['azure_subnet_name']
- project_conf['private_subnet_name'] = project_conf['service_base_name'] + '-' + project_conf['project_name'] + '-subnet'
+ project_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
if os.environ['conf_network_type'] == 'private':
project_conf['static_public_ip_name'] = 'None'
else:
- project_conf['static_public_ip_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-edge-ip'
+ project_conf['static_public_ip_name'] = '{}-{}-{}-edge-static-ip'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
project_conf['region'] = os.environ['azure_region']
project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
@@ -65,90 +78,99 @@
project_conf['project_name'],
project_conf['endpoint_tag'])
project_conf['network_interface_name'] = '{0}-nif'.format(project_conf['instance_name'])
- project_conf['primary_disk_name'] = project_conf['instance_name'] + '-disk0'
+ project_conf['primary_disk_name'] = project_conf['instance_name'] + '-volume-0'
project_conf['edge_security_group_name'] = project_conf['instance_name'] + '-sg'
- project_conf['notebook_security_group_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + "-" + os.environ['endpoint_name']\
- + '-nb-sg'
- project_conf['master_security_group_name'] = project_conf['service_base_name'] + '-' \
- + project_conf['project_name'] + '-dataengine-master-sg'
- project_conf['slave_security_group_name'] = project_conf['service_base_name'] + '-' \
- + project_conf['project_name'] + '-dataengine-slave-sg'
- project_conf['edge_storage_account_name'] = '{0}-{1}-{2}-storage'.format(project_conf['service_base_name'],
+ project_conf['notebook_security_group_name'] = '{}-{}-{}-nb-sg'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['edge_storage_account_name'] = ('{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
project_conf['project_name'],
- project_conf['endpoint_name'])
- project_conf['edge_container_name'] = (project_conf['service_base_name'] + '-' + project_conf['project_name'] + '-' + project_conf['endpoint_name'] +
- '-container').lower()
- project_conf['datalake_store_name'] = project_conf['service_base_name'] + '-ssn-datalake'
- project_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(project_conf['service_base_name'],
- project_conf['project_name'])
+ project_conf['endpoint_name'])).lower()
+ project_conf['edge_container_name'] = ('{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])).lower()
+ project_conf['datalake_store_name'] = '{}-ssn-datalake'.format(project_conf['service_base_name'])
+ project_conf['datalake_user_directory_name'] = '{0}-{1}-{2}-folder'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
ssh_key_path = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + '.pem'
key = RSA.importKey(open(ssh_key_path, 'rb').read())
project_conf['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
project_conf['instance_storage_account_type'] = 'Premium_LRS'
project_conf['image_name'] = os.environ['azure_{}_image_name'.format(os.environ['conf_os_family'])]
project_conf['instance_tags'] = {"Name": project_conf['instance_name'],
- "SBN": project_conf['service_base_name'],
- "project_tag": project_conf['project_tag'],
- "endpoint_tag": project_conf['endpoint_tag'],
- os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ "SBN": project_conf['service_base_name'],
+ "project_tag": project_conf['project_tag'],
+ "endpoint_tag": project_conf['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
project_conf['storage_account_tags'] = {"Name": project_conf['edge_storage_account_name'],
"SBN": project_conf['service_base_name'],
"project_tag": project_conf['project_tag'],
"endpoint_tag": project_conf['endpoint_tag'],
- os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
+ os.environ['conf_billing_tag_key']:
+ os.environ['conf_billing_tag_value'],
project_conf['tag_name']: project_conf['edge_storage_account_name']}
project_conf['primary_disk_size'] = '32'
- project_conf['shared_storage_account_name'] = '{0}-{1}-shared-storage'.format(project_conf['service_base_name'],
- project_conf['endpoint_name'])
- project_conf['shared_container_name'] = '{}-{}-shared-container'.format(project_conf['service_base_name'], project_conf['endpoint_name']).lower()
+ project_conf['shared_storage_account_name'] = ('{0}-{1}-shared-bucket'.format(
+ project_conf['service_base_name'], project_conf['endpoint_name'])).lower()
+ project_conf['shared_container_name'] = ('{}-{}-shared-bucket'.format(project_conf['service_base_name'],
+ project_conf['endpoint_name'])).lower()
project_conf['shared_storage_account_tags'] = {"Name": project_conf['shared_storage_account_name'],
- "SBN": project_conf['service_base_name'],
- os.environ['conf_billing_tag_key']: os.environ[
- 'conf_billing_tag_value'], "endpoint_tag": project_conf['endpoint_tag'],
- project_conf['tag_name']: project_conf['shared_storage_account_name']}
+ "SBN": project_conf['service_base_name'],
+ os.environ['conf_billing_tag_key']: os.environ[
+ 'conf_billing_tag_value'], "endpoint_tag":
+ project_conf['endpoint_tag'],
+ project_conf['tag_name']:
+ project_conf['shared_storage_account_name']}
# FUSE in case of absence of user's key
try:
project_conf['user_key'] = os.environ['key']
try:
local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
- os.environ['project_name']))
+ project_conf['project_name']))
except:
print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
except KeyError:
print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
sys.exit(1)
- print("Will create exploratory environment with edge node as access point as following: {}".format(json.dumps(project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+ print("Will create exploratory environment with edge node as access point as following: {}".format(json.dumps(
+ project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
logging.info(json.dumps(project_conf))
except Exception as err:
- print("Failed to generate variables dictionary.")
- append_result("Failed to generate variables dictionary.", str(err))
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATE SUBNET]')
print('[CREATE SUBNET]')
params = "--resource_group_name {} --vpc_name {} --region {} --vpc_cidr {} --subnet_name {} --prefix {}".\
- format(project_conf['resource_group_name'], project_conf['vpc_name'], project_conf['region'], project_conf['vpc_cidr'],
- project_conf['private_subnet_name'], project_conf['private_subnet_prefix'])
+ format(project_conf['resource_group_name'], project_conf['vpc_name'], project_conf['region'],
+ project_conf['vpc_cidr'], project_conf['private_subnet_name'], project_conf['private_subnet_prefix'])
try:
local("~/scripts/{}.py {}".format('common_create_subnet', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
try:
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
except:
print("Subnet hasn't been created.")
- append_result("Failed to create subnet.", str(err))
+ dlab.fab.append_result("Failed to create subnet.", str(err))
sys.exit(1)
- project_conf['private_subnet_cidr'] = AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name']).address_prefix
+ project_conf['private_subnet_cidr'] = AzureMeta.get_subnet(project_conf['resource_group_name'],
+ project_conf['vpc_name'],
+ project_conf['private_subnet_name']).address_prefix
print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
try:
@@ -421,20 +443,20 @@
}
]
params = "--resource_group_name {} --security_group_name {} --region {} --tags '{}' --list_rules '{}'". \
- format(project_conf['resource_group_name'], project_conf['edge_security_group_name'], project_conf['region'],
- json.dumps(project_conf['instance_tags']), json.dumps(edge_list_rules))
+ format(project_conf['resource_group_name'], project_conf['edge_security_group_name'],
+ project_conf['region'], json.dumps(project_conf['instance_tags']), json.dumps(edge_list_rules))
try:
local("~/scripts/{}.py {}".format('common_create_security_group', params))
except Exception as err:
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
try:
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
except:
print("Edge Security group hasn't been created.")
traceback.print_exc()
- append_result("Failed creating security group for edge node.", str(err))
+ dlab.fab.append_result("Failed creating security group for edge node.", str(err))
raise Exception
except:
sys.exit(1)
@@ -459,7 +481,8 @@
"protocol": "*",
"source_port_range": "*",
"destination_port_range": "*",
- "source_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ "source_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+ project_conf['vpc_name'],
project_conf['subnet_name']).address_prefix,
"destination_address_prefix": "*",
"access": "Allow",
@@ -494,8 +517,9 @@
"source_port_range": "*",
"destination_port_range": "*",
"source_address_prefix": "*",
- "destination_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['subnet_name']).address_prefix,
+ "destination_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+ project_conf['vpc_name'],
+ project_conf['subnet_name']).address_prefix,
"access": "Allow",
"priority": 110,
"direction": "Outbound"
@@ -524,21 +548,22 @@
}
]
params = "--resource_group_name {} --security_group_name {} --region {} --tags '{}' --list_rules '{}'". \
- format(project_conf['resource_group_name'], project_conf['notebook_security_group_name'], project_conf['region'],
- json.dumps(project_conf['instance_tags']), json.dumps(notebook_list_rules))
+ format(project_conf['resource_group_name'], project_conf['notebook_security_group_name'],
+ project_conf['region'], json.dumps(project_conf['instance_tags']), json.dumps(notebook_list_rules))
try:
local("~/scripts/{}.py {}".format('common_create_security_group', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
+ dlab.fab.append_result("Failed creating security group for private subnet.", str(err))
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
try:
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['notebook_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['notebook_security_group_name'])
except:
print("Notebook Security group hasn't been created.")
sys.exit(1)
@@ -563,9 +588,9 @@
"protocol": "*",
"source_port_range": "*",
"destination_port_range": "*",
- "source_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'],
- project_conf['vpc_name'],
- project_conf['subnet_name']).address_prefix,
+ "source_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+ project_conf['vpc_name'],
+ project_conf['subnet_name']).address_prefix,
"destination_address_prefix": "*",
"access": "Allow",
"priority": 110,
@@ -599,9 +624,9 @@
"source_port_range": "*",
"destination_port_range": "*",
"source_address_prefix": "*",
- "destination_address_prefix": AzureMeta().get_subnet(project_conf['resource_group_name'],
- project_conf['vpc_name'],
- project_conf['subnet_name']).address_prefix,
+ "destination_address_prefix": AzureMeta.get_subnet(project_conf['resource_group_name'],
+ project_conf['vpc_name'],
+ project_conf['subnet_name']).address_prefix,
"access": "Allow",
"priority": 110,
"direction": "Outbound"
@@ -638,18 +663,18 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['notebook_security_group_name'])
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['notebook_security_group_name'])
try:
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['master_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['master_security_group_name'])
except:
print("Master Security group hasn't been created.")
- append_result("Failed to create Security groups. Exception:" + str(err))
+ dlab.fab.append_result("Failed to create Security groups. Exception:" + str(err))
sys.exit(1)
logging.info('[CREATING SECURITY GROUPS FOR SLAVE NODES]')
@@ -664,20 +689,20 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['master_security_group_name'])
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['notebook_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['master_security_group_name'])
try:
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['slave_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['slave_security_group_name'])
except:
print("Slave Security group hasn't been created.")
- append_result("Failed to create Security groups. Exception:" + str(err))
+ dlab.fab.append_result("Failed to create Security groups. Exception:" + str(err))
sys.exit(1)
try:
@@ -688,21 +713,20 @@
project_conf['resource_group_name'], project_conf['region'])
local("~/scripts/{}.py {}".format('common_create_storage_account', params))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create storage account.", str(err))
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['edge_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['master_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+ dlab.fab.append_result("Failed to create storage account.", str(err))
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['notebook_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['master_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['slave_security_group_name'])
+ for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+ AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
sys.exit(1)
try:
@@ -718,67 +742,71 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create storage account.", str(err))
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['master_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+ dlab.fab.append_result("Failed to create storage account.", str(err))
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['notebook_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['master_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['slave_security_group_name'])
+ for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+ AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+ AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
sys.exit(1)
if os.environ['azure_datalake_enable'] == 'true':
try:
logging.info('[CREATE DATA LAKE STORE DIRECTORY]')
print('[CREATE DATA LAKE STORE DIRECTORY]')
- params = "--resource_group_name {} --datalake_name {} --directory_name {} --ad_user {} --service_base_name {}". \
- format(project_conf['resource_group_name'], project_conf['datalake_store_name'],
- project_conf['datalake_user_directory_name'], project_conf['azure_ad_user_name'],
- project_conf['service_base_name'])
+ params = "--resource_group_name {} --datalake_name {} --directory_name {} --ad_user {} " \
+ "--service_base_name {}".format(project_conf['resource_group_name'],
+ project_conf['datalake_store_name'],
+ project_conf['datalake_user_directory_name'],
+ project_conf['azure_ad_user_name'],
+ project_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('common_create_datalake_directory', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create Data Lake Store directory.", str(err))
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['master_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+ dlab.fab.append_result("Failed to create Data Lake Store directory.", str(err))
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['notebook_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['master_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['slave_security_group_name'])
+ for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+ AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+ AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
try:
- for datalake in AzureMeta().list_datalakes(project_conf['resource_group_name']):
+ for datalake in AzureMeta.list_datalakes(project_conf['resource_group_name']):
if project_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, project_conf['datalake_user_directory_name'])
- except Exception as err:
- print('Error: {0}'.format(err))
+ AzureActions.remove_datalake_directory(datalake.name,
+ project_conf['datalake_user_directory_name'])
+ except:
print("Data Lake Store directory hasn't been created.")
sys.exit(1)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ project_conf['initial_user'] = 'ubuntu'
+ project_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ project_conf['initial_user'] = 'ec2-user'
+ project_conf['sudo_group'] = 'wheel'
try:
logging.info('[CREATE EDGE INSTANCE]')
@@ -788,10 +816,12 @@
--dlab_ssh_user_name {} --public_ip_name {} --public_key '''{}''' --primary_disk_size {} \
--instance_type {} --project_name {} --instance_storage_account_type {} --image_name {} --tags '{}'".\
format(project_conf['instance_name'], os.environ['azure_edge_instance_size'], project_conf['region'],
- project_conf['vpc_name'], project_conf['network_interface_name'], project_conf['edge_security_group_name'],
- project_conf['subnet_name'], project_conf['service_base_name'], project_conf['resource_group_name'],
- initial_user, project_conf['static_public_ip_name'], project_conf['public_ssh_key'],
- project_conf['primary_disk_size'], 'edge', project_conf['project_name'], project_conf['instance_storage_account_type'],
+ project_conf['vpc_name'], project_conf['network_interface_name'],
+ project_conf['edge_security_group_name'], project_conf['subnet_name'],
+ project_conf['service_base_name'], project_conf['resource_group_name'],
+ project_conf['initial_user'], project_conf['static_public_ip_name'], project_conf['public_ssh_key'],
+ project_conf['primary_disk_size'], 'edge', project_conf['project_name'],
+ project_conf['instance_storage_account_type'],
project_conf['image_name'], json.dumps(project_conf['instance_tags']))
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
@@ -799,27 +829,29 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
try:
- AzureActions().remove_instance(project_conf['resource_group_name'], project_conf['instance_name'])
+ AzureActions.remove_instance(project_conf['resource_group_name'], project_conf['instance_name'])
except:
print("The instance hasn't been created.")
- AzureActions().remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
- project_conf['private_subnet_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['edge_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'], project_conf['notebook_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['master_security_group_name'])
- AzureActions().remove_security_group(project_conf['resource_group_name'],
- project_conf['slave_security_group_name'])
- for storage_account in AzureMeta().list_storage_accounts(project_conf['resource_group_name']):
+ AzureActions.remove_subnet(project_conf['resource_group_name'], project_conf['vpc_name'],
+ project_conf['private_subnet_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['edge_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['notebook_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['master_security_group_name'])
+ AzureActions.remove_security_group(project_conf['resource_group_name'],
+ project_conf['slave_security_group_name'])
+ for storage_account in AzureMeta.list_storage_accounts(project_conf['resource_group_name']):
if project_conf['edge_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+ AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
if project_conf['shared_storage_account_name'] == storage_account.tags["Name"]:
- AzureActions().remove_storage_account(project_conf['resource_group_name'], storage_account.name)
+ AzureActions.remove_storage_account(project_conf['resource_group_name'], storage_account.name)
if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(project_conf['resource_group_name']):
+ for datalake in AzureMeta.list_datalakes(project_conf['resource_group_name']):
if project_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().remove_datalake_directory(datalake.name, project_conf['datalake_user_directory_name'])
- append_result("Failed to create instance. Exception:" + str(err))
+ AzureActions.remove_datalake_directory(datalake.name,
+ project_conf['datalake_user_directory_name'])
+ dlab.fab.append_result("Failed to create instance. Exception:" + str(err))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
index 202a68b..765959f 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/project_terminate.py
@@ -22,128 +22,149 @@
# ******************************************************************************
import json
-import sys, time, os
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import logging
+import sys
+import time
+import os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import requests
+import traceback
def terminate_edge_node(resource_group_name, service_base_name, project_tag, subnet_name, vpc_name):
print("Terminating EDGE, notebook and dataengine virtual machines")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
try:
if project_tag == vm.tags["project_tag"]:
- AzureActions().remove_instance(resource_group_name, vm.name)
+ AzureActions.remove_instance(resource_group_name, vm.name)
print("Instance {} has been terminated".format(vm.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate edge instance.", str(err))
sys.exit(1)
print("Removing network interfaces")
try:
- for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+ for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
try:
if project_tag == network_interface.tags["project_name"]:
- AzureActions().delete_network_if(resource_group_name, network_interface.name)
+ AzureActions.delete_network_if(resource_group_name, network_interface.name)
print("Network interface {} has been removed".format(network_interface.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove network interfaces.", str(err))
sys.exit(1)
print("Removing static public IPs")
try:
- for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+ for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
try:
if project_tag in static_public_ip.tags["project_tag"]:
- AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+ AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
print("Static public IP {} has been removed".format(static_public_ip.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove static IP addresses.", str(err))
sys.exit(1)
print("Removing disks")
try:
- for disk in AzureMeta().list_disks(resource_group_name):
+ for disk in AzureMeta.list_disks(resource_group_name):
try:
if project_tag in disk.tags["project_tag"]:
- AzureActions().remove_disk(resource_group_name, disk.name)
+ AzureActions.remove_disk(resource_group_name, disk.name)
print("Disk {} has been removed".format(disk.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove volumes.", str(err))
sys.exit(1)
print("Removing storage account")
try:
- for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+ for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
try:
if project_tag == storage_account.tags["project_tag"]:
- AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+ AzureActions.remove_storage_account(resource_group_name, storage_account.name)
print("Storage account {} has been terminated".format(storage_account.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove storage accounts.", str(err))
sys.exit(1)
print("Deleting Data Lake Store directory")
try:
- for datalake in AzureMeta().list_datalakes(resource_group_name):
+ for datalake in AzureMeta.list_datalakes(resource_group_name):
try:
if service_base_name == datalake.tags["SBN"]:
- AzureActions().remove_datalake_directory(datalake.name, project_tag + '-folder')
+ AzureActions.remove_datalake_directory(datalake.name, project_tag + '-folder')
print("Data Lake Store directory {} has been deleted".format(project_tag + '-folder'))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove Data Lake.", str(err))
+ sys.exit(1)
+
+ print("Removing project specific images")
+ try:
+ for image in AzureMeta.list_images():
+ if service_base_name == image.tags["SBN"] and 'project_tag' in image.tags \
+ and project_tag == image.tags["project_tag"]:
+ AzureActions.remove_image(resource_group_name, image.name)
+ print("Image {} has been removed".format(image.name))
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove images", str(err))
sys.exit(1)
print("Removing security groups")
try:
- for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+ for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
try:
if project_tag == sg.tags["project_tag"]:
- AzureActions().remove_security_group(resource_group_name, sg.name)
+ AzureActions.remove_security_group(resource_group_name, sg.name)
print("Security group {} has been terminated".format(sg.name))
except:
pass
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove security groups.", str(err))
sys.exit(1)
print("Removing private subnet")
try:
- AzureActions().remove_subnet(resource_group_name, vpc_name, subnet_name)
+ AzureActions.remove_subnet(resource_group_name, vpc_name, subnet_name)
print("Private subnet {} has been terminated".format(subnet_name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove subnets.", str(err))
sys.exit(1)
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/edge/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
print('Generating infrastructure names and tags')
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
project_conf = dict()
project_conf['service_base_name'] = os.environ['conf_service_base_name']
project_conf['resource_group_name'] = os.environ['azure_resource_group_name']
- project_conf['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- project_conf['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- project_conf['private_subnet_name'] = project_conf['service_base_name'] + "-" + project_conf['project_name'] + '-subnet'
+ project_conf['project_name'] = os.environ['project_name']
+ project_conf['project_tag'] = project_conf['project_name']
+ project_conf['endpoint_name'] = os.environ['endpoint_name']
+ project_conf['private_subnet_name'] = '{}-{}-{}-subnet'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
project_conf['vpc_name'] = os.environ['azure_vpc_name']
@@ -152,18 +173,22 @@
print('[TERMINATE EDGE]')
try:
terminate_edge_node(project_conf['resource_group_name'], project_conf['service_base_name'],
- project_conf['project_tag'], project_conf['private_subnet_name'], project_conf['vpc_name'])
+ project_conf['project_tag'], project_conf['private_subnet_name'],
+ project_conf['vpc_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate edge.", str(err))
+ dlab.fab.append_result("Failed to terminate edge.", str(err))
+ raise Exception
except:
sys.exit(1)
try:
print('[KEYCLOAK PROJECT CLIENT DELETE]')
logging.info('[KEYCLOAK PROJECT CLIENT DELETE]')
- keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(os.environ['keycloak_auth_server_url'])
- keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'], os.environ['keycloak_realm_name'])
+ keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+ os.environ['keycloak_auth_server_url'])
+ keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+ os.environ['keycloak_realm_name'])
keycloak_auth_data = {
"username": os.environ['keycloak_user'],
@@ -173,7 +198,8 @@
}
client_params = {
- "clientId": project_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'],
+ "clientId": "{}-{}-{}".format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
}
keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
@@ -201,6 +227,6 @@
"Action": "Terminate edge node"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
index a9d2e50..8487238 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/rstudio_configure.py
@@ -24,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
if __name__ == "__main__":
@@ -39,33 +41,36 @@
level=logging.DEBUG,
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
+ notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
notebook_config['image_enabled'] = os.environ['conf_image_enabled']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
if notebook_config['shared_image_enabled'] == 'false':
notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
notebook_config['project_name'],
+ notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"project_tag": notebook_config['project_tag'],
@@ -77,7 +82,7 @@
notebook_config['service_base_name'],
notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"endpoint_tag": notebook_config['endpoint_tag'],
@@ -85,7 +90,8 @@
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
@@ -94,46 +100,45 @@
"endpoint_tag": notebook_config['endpoint_tag'],
"Exploratory": notebook_config['exploratory_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
# generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
notebook_config['endpoint_name'])
- edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
+ notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+ os.environ['azure_region'])
if os.environ['conf_network_type'] == 'private':
- edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
else:
- edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = notebook_config['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['rstudio_pass'] = id_generator()
- edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
-
+ edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
+ notebook_config['rstudio_pass'] = dlab.fab.id_generator()
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate variables dictionary.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +146,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -160,9 +164,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -178,9 +181,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring R_STUDIO and all dependencies
@@ -190,7 +192,7 @@
params = "--hostname {0} --keyfile {1} " \
"--region {2} --rstudio_pass {3} " \
"--rstudio_version {4} --os_user {5} " \
- "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9} " \
+ "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9} " \
.format(instance_hostname, keyfile_name,
os.environ['azure_region'], notebook_config['rstudio_pass'],
os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -198,15 +200,14 @@
notebook_config['exploratory_name'], edge_hostname)
try:
local("~/scripts/{}.py {}".format('configure_rstudio_node', params))
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure rstudio.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure rstudio.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -222,9 +223,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -235,12 +235,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -256,49 +255,45 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+ image = AzureMeta.get_image(notebook_config['resource_group_name'],
+ notebook_config['expected_image_name'])
if image == '':
print("Looks like it's first time we configure notebook server. Creating image.")
- prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
- notebook_config['instance_name'],
- os.environ['azure_region'],
- notebook_config['expected_image_name'],
- json.dumps(notebook_config['image_tags']))
+ dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+ notebook_config['instance_name'],
+ os.environ['azure_region'],
+ notebook_config['expected_image_name'],
+ json.dumps(notebook_config['image_tags']))
print("Image was successfully created.")
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
- if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
- notebook_config['instance_name']) == 'running':
+ if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+ notebook_config['instance_name']) == 'running':
instance_running = True
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
- 'http://{}:3128'.format(edge_instance_private_hostname))
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+ 'http://{}:3128'.format(edge_instance_private_hostname))
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name,
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
-
- params = "--hostname {} --keyfile {} --os_user {} --rstudio_pass {}" \
- .format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
- notebook_config['rstudio_pass'])
- local("~/scripts/{}.py {}".format('rstudio_change_pass', params))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -323,18 +318,17 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
# generating output information
- ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
rstudio_ip_url = "http://" + ip_address + ":8787/"
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
@@ -377,7 +371,6 @@
"exploratory_pass": notebook_config['rstudio_pass']}
result.write(json.dumps(res))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate output information", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
index 4f35a62..856cce4 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_configure.py
@@ -21,12 +21,15 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os, json
+import sys
+import os
+import json
from fabric.api import *
-from dlab.ssn_lib import *
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
import traceback
if __name__ == "__main__":
@@ -36,49 +39,71 @@
level=logging.DEBUG,
filename=local_log_filepath)
+ def clear_resources():
+ AzureActions.remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
+ for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
+ if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
+ AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+ if 'azure_security_group_name' not in os.environ:
+ AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+ if 'azure_subnet_name' not in os.environ:
+ AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+ ssn_conf['subnet_name'])
+ if 'azure_vpc_name' not in os.environ:
+ AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ if 'azure_resource_group_name' not in os.environ:
+ AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
+
+
try:
- instance = 'ssn'
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
+ ssn_conf = dict()
+ ssn_conf['instance'] = 'ssn'
logging.info('[DERIVING NAMES]')
print('[DERIVING NAMES]')
- billing_enabled = True
-
- ssn_conf = dict()
- # We need to cut service_base_name to 12 symbols do to the Azure Name length limitation
- ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
+ ssn_conf['billing_enabled'] = True
+ # We need to cut service_base_name to 20 symbols do to the Azure Name length limitation
+ ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
# Check azure predefined resources
- ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name', ssn_conf['service_base_name'])
+ ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name',
+ '{}-resource-group'.format(ssn_conf['service_base_name']))
ssn_conf['vpc_name'] = os.environ.get('azure_vpc_name', '{}-vpc'.format(ssn_conf['service_base_name']))
- ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-ssn-subnet'.format(ssn_conf['service_base_name']))
- ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(ssn_conf['service_base_name']))
+ ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-subnet'.format(ssn_conf['service_base_name']))
+ ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(
+ ssn_conf['service_base_name']))
# Default variables
ssn_conf['region'] = os.environ['azure_region']
- ssn_conf['ssn_container_name'] = '{}-ssn-container'.format(ssn_conf['service_base_name']).lower()
ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
ssn_conf['datalake_store_name'] = '{}-ssn-datalake'.format(ssn_conf['service_base_name'])
ssn_conf['datalake_shared_directory_name'] = '{}-shared-folder'.format(ssn_conf['service_base_name'])
ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
- ssn_conf['ssh_key_path'] = os.environ['conf_key_dir'] + os.environ['conf_key_name'] + '.pem'
+ ssn_conf['ssh_key_path'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+ ssn_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(ssn_conf['instance_name'],
+ ssn_conf['region'])
if os.environ['conf_network_type'] == 'private':
- ssn_conf['instnace_ip'] = AzureMeta().get_private_ip_address(ssn_conf['resource_group_name'],
- ssn_conf['instance_name'])
+ ssn_conf['instnace_ip'] = AzureMeta.get_private_ip_address(ssn_conf['resource_group_name'],
+ ssn_conf['instance_name'])
+ ssn_conf['instance_host'] = ssn_conf['instnace_ip']
else:
- ssn_conf['instnace_ip'] = AzureMeta().get_instance_public_ip_address(ssn_conf['resource_group_name'],
- ssn_conf['instance_name'])
- ssn_conf['instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(ssn_conf['instance_name'], ssn_conf['region'])
+ ssn_conf['instnace_ip'] = AzureMeta.get_instance_public_ip_address(ssn_conf['resource_group_name'],
+ ssn_conf['instance_name'])
+ ssn_conf['instance_host'] = ssn_conf['instance_dns_name']
if os.environ['conf_stepcerts_enabled'] == 'true':
- step_cert_sans = ' --san {0} --san {1} '.format(AzureMeta().get_private_ip_address(
- ssn_conf['resource_group_name'], ssn_conf['instance_name']), ssn_conf['instance_dns_name'])
+ ssn_conf['step_cert_sans'] = ' --san {0} '.format(AzureMeta.get_private_ip_address(
+ ssn_conf['resource_group_name'], ssn_conf['instance_name']))
if os.environ['conf_network_type'] == 'public':
- step_cert_sans += ' --san {0}'.format(
- AzureMeta().get_instance_public_ip_address(ssn_conf['resource_group_name'],
- ssn_conf['instance_name']))
+ ssn_conf['step_cert_sans'] += ' --san {0} --san {1} '.format(
+ AzureMeta.get_instance_public_ip_address(ssn_conf['resource_group_name'],
+ ssn_conf['instance_name']),
+ ssn_conf['instance_dns_name'])
else:
- step_cert_sans = ''
+ ssn_conf['step_cert_sans'] = ''
try:
if os.environ['azure_offer_number'] == '':
@@ -90,62 +115,49 @@
if os.environ['azure_region_info'] == '':
raise KeyError
except KeyError:
- billing_enabled = False
- if not billing_enabled:
+ ssn_conf['billing_enabled'] = False
+ if not ssn_conf['billing_enabled']:
os.environ['azure_offer_number'] = 'None'
os.environ['azure_currency'] = 'None'
os.environ['azure_locale'] = 'None'
os.environ['azure_region_info'] = 'None'
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ ssn_conf['initial_user'] = 'ubuntu'
+ ssn_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ ssn_conf['initial_user'] = 'ec2-user'
+ ssn_conf['sudo_group'] = 'wheel'
except Exception as err:
- print("Failed to generate variables dictionary." + str(err))
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ clear_resources()
sys.exit(1)
- def clear_resources():
- if 'azure_resource_group_name' not in os.environ:
- AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
- if 'azure_vpc_name' not in os.environ:
- AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
- if 'azure_subnet_name' not in os.environ:
- AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
- ssn_conf['subnet_name'])
- if 'azure_security_group_name' not in os.environ:
- AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
- for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
- if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
- AzureActions().remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
-
try:
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], initial_user, ssn_conf['dlab_ssh_user'], sudo_group)
+ (ssn_conf['instance_host'], ssn_conf['ssh_key_path'], ssn_conf['initial_user'], ssn_conf['dlab_ssh_user'],
+ ssn_conf['sudo_group'])
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except Exception as err:
- #print('Error: {0}'.format(err))
traceback.print_exc()
clear_resources()
- append_result("Failed creating ssh user 'dlab-user'.", str(err))
+ dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
print('[INSTALLING PREREQUISITES TO SSN INSTANCE]')
- params = "--hostname {} --keyfile {} --pip_packages 'backoff argparse fabric==1.14.0 pymongo pyyaml pycrypto azure==2.0.0' \
- --user {} --region {}".format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'],
- ssn_conf['dlab_ssh_user'], ssn_conf['region'])
+ params = "--hostname {} --keyfile {} --pip_packages 'backoff argparse fabric==1.14.0 pymongo pyyaml " \
+ "pycrypto azure==2.0.0' --user {} --region {}".format(ssn_conf['instance_host'],
+ ssn_conf['ssh_key_path'],
+ ssn_conf['dlab_ssh_user'],
+ ssn_conf['region'])
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except Exception as err:
- #print('Error: {0}'.format(err))
traceback.print_exc()
clear_resources()
- append_result("Failed installing software: pip, packages.", str(err))
+ dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
sys.exit(1)
try:
@@ -157,15 +169,14 @@
"subnet_id": ssn_conf['subnet_name'], "admin_key": os.environ['conf_key_name']}
params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
"--tag_resource_id {} --step_cert_sans '{}'". \
- format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
+ format(ssn_conf['instance_host'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'],
- step_cert_sans)
+ ssn_conf['step_cert_sans'])
local("~/scripts/{}.py {}".format('configure_ssn_node', params))
except Exception as err:
- #print('Error: {0}'.format(err))
traceback.print_exc()
clear_resources()
- append_result("Failed configuring ssn.", str(err))
+ dlab.fab.append_result("Failed configuring ssn.", str(err))
sys.exit(1)
try:
@@ -181,28 +192,28 @@
{"name": "tensor", "tag": "latest"},
{"name": "deeplearning", "tag": "latest"},
{"name": "dataengine", "tag": "latest"}]
- params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} --cloud_provider {} --region {}". \
- format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
- os.environ['conf_os_family'], ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
- os.environ['conf_cloud_provider'], ssn_conf['region'])
+ params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
+ "--cloud_provider {} --region {}".format(ssn_conf['instance_host'], ssn_conf['ssh_key_path'],
+ json.dumps(additional_config), os.environ['conf_os_family'],
+ ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
+ os.environ['conf_cloud_provider'], ssn_conf['region'])
local("~/scripts/{}.py {}".format('configure_docker', params))
except Exception as err:
- #print('Error: {0}'.format(err))
traceback.print_exc()
clear_resources()
- append_result("Unable to configure docker.", str(err))
+ dlab.fab.append_result("Unable to configure docker.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SSN INSTANCE UI]')
print('[CONFIGURE SSN INSTANCE UI]')
- azure_auth_path = '/home/{}/keys/azure_auth.json'.format(ssn_conf['dlab_ssh_user'])
- ldap_login = 'false'
+ ssn_conf['azure_auth_path'] = '/home/{}/keys/azure_auth.json'.format(ssn_conf['dlab_ssh_user'])
+ ssn_conf['ldap_login'] = 'false'
cloud_params = [
{
'key': 'KEYCLOAK_REDIRECT_URI',
- 'value': "https://{0}/".format(ssn_conf['instnace_ip'])
+ 'value': "https://{0}/".format(ssn_conf['instance_host'])
},
{
'key': 'KEYCLOAK_REALM_NAME',
@@ -313,10 +324,6 @@
'value': ''
},
{
- 'key': 'SHARED_IMAGE_ENABLED',
- 'value': os.environ['conf_shared_image_enabled']
- },
- {
'key': 'CONF_IMAGE_ENABLED',
'value': os.environ['conf_image_enabled']
},
@@ -391,11 +398,11 @@
'value': ''
})
if os.environ['azure_oauth2_enabled'] == 'false':
- ldap_login = 'true'
- tenant_id = json.dumps(AzureMeta().sp_creds['tenantId']).replace('"', '')
- subscription_id = json.dumps(AzureMeta().sp_creds['subscriptionId']).replace('"', '')
- datalake_application_id = os.environ['azure_application_id']
- datalake_store_name = None
+ ssn_conf['ldap_login'] = 'true'
+ ssn_conf['tenant_id'] = json.dumps(AzureMeta.sp_creds['tenantId']).replace('"', '')
+ ssn_conf['subscription_id'] = json.dumps(AzureMeta.sp_creds['subscriptionId']).replace('"', '')
+ ssn_conf['datalake_application_id'] = os.environ['azure_application_id']
+ ssn_conf['datalake_store_name'] = None
else:
cloud_params.append(
{
@@ -407,30 +414,30 @@
'key': 'AZURE_CLIENT_ID',
'value': os.environ['azure_application_id']
})
- tenant_id = json.dumps(AzureMeta().sp_creds['tenantId']).replace('"', '')
- subscription_id = json.dumps(AzureMeta().sp_creds['subscriptionId']).replace('"', '')
- datalake_application_id = os.environ['azure_application_id']
- for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+ ssn_conf['tenant_id'] = json.dumps(AzureMeta.sp_creds['tenantId']).replace('"', '')
+ ssn_conf['subscription_id'] = json.dumps(AzureMeta.sp_creds['subscriptionId']).replace('"', '')
+ ssn_conf['datalake_application_id'] = os.environ['azure_application_id']
+ for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
- datalake_store_name = datalake.name
- params = "--hostname {} --keyfile {} --dlab_path {} --os_user {} --os_family {} --request_id {} \
- --resource {} --service_base_name {} --cloud_provider {} --billing_enabled {} --authentication_file {} \
- --offer_number {} --currency {} --locale {} --region_info {} --ldap_login {} --tenant_id {} \
- --application_id {} --datalake_store_name {} --cloud_params '{}' --subscription_id {} \
- --validate_permission_scope {} --default_endpoint_name {}". \
- format(ssn_conf['instnace_ip'], ssn_conf['ssh_key_path'], os.environ['ssn_dlab_path'],
- ssn_conf['dlab_ssh_user'], os.environ['conf_os_family'], os.environ['request_id'],
- os.environ['conf_resource'], ssn_conf['service_base_name'], os.environ['conf_cloud_provider'],
- billing_enabled, azure_auth_path, os.environ['azure_offer_number'],
- os.environ['azure_currency'], os.environ['azure_locale'], os.environ['azure_region_info'],
- ldap_login, tenant_id, datalake_application_id, datalake_store_name, json.dumps(cloud_params),
- subscription_id, os.environ['azure_validate_permission_scope'], ssn_conf['default_endpoint_name'])
+ ssn_conf['datalake_store_name'] = datalake.name
+ params = "--hostname {} --keyfile {} --dlab_path {} --os_user {} --os_family {} --request_id {} " \
+ "--resource {} --service_base_name {} --cloud_provider {} --billing_enabled {} " \
+ "--authentication_file {} --offer_number {} --currency {} --locale {} --region_info {} " \
+ "--ldap_login {} --tenant_id {} --application_id {} --datalake_store_name {} --cloud_params '{}' " \
+ "--subscription_id {} --validate_permission_scope {} --default_endpoint_name {}".format(
+ ssn_conf['instance_host'], ssn_conf['ssh_key_path'], os.environ['ssn_dlab_path'],
+ ssn_conf['dlab_ssh_user'], os.environ['conf_os_family'], os.environ['request_id'],
+ os.environ['conf_resource'], ssn_conf['service_base_name'], os.environ['conf_cloud_provider'],
+ ssn_conf['billing_enabled'], ssn_conf['azure_auth_path'], os.environ['azure_offer_number'],
+ os.environ['azure_currency'], os.environ['azure_locale'], os.environ['azure_region_info'],
+ ssn_conf['ldap_login'], ssn_conf['tenant_id'], ssn_conf['datalake_application_id'],
+ ssn_conf['datalake_store_name'], json.dumps(cloud_params), ssn_conf['subscription_id'],
+ os.environ['azure_validate_permission_scope'], ssn_conf['default_endpoint_name'])
local("~/scripts/{}.py {}".format('configure_ui', params))
except Exception as err:
- #print('Error: {0}'.format(err))
traceback.print_exc()
clear_resources()
- append_result("Unable to configure UI.", str(err))
+ dlab.fab.append_result("Unable to configure UI.", str(err))
sys.exit(1)
try:
@@ -447,21 +454,22 @@
print("Key name: {}".format(os.environ['conf_key_name']))
print("VPC Name: {}".format(ssn_conf['vpc_name']))
print("Subnet Name: {}".format(ssn_conf['subnet_name']))
- print("Firewall Names: {}".format(ssn_conf['security_group_name']))
+ print("Security groups Names: {}".format(ssn_conf['security_group_name']))
print("SSN instance size: {}".format(os.environ['azure_ssn_instance_size']))
+ ssn_conf['datalake_store_full_name'] = 'None'
if os.environ['azure_datalake_enable'] == 'true':
- for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+ for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
- datalake_store_name = datalake.name
- print("DataLake store name: {}".format(datalake_store_name))
+ ssn_conf['datalake_store_full_name'] = datalake.name
+ print("DataLake store name: {}".format(ssn_conf['datalake_store_full_name']))
print("DataLake shared directory name: {}".format(ssn_conf['datalake_shared_directory_name']))
print("Region: {}".format(ssn_conf['region']))
- jenkins_url = "http://{}/jenkins".format(ssn_conf['instnace_ip'])
- jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instnace_ip'])
+ jenkins_url = "http://{}/jenkins".format(ssn_conf['instance_host'])
+ jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instance_host'])
print("Jenkins URL: {}".format(jenkins_url))
print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
- print("DLab UI HTTP URL: http://{}".format(ssn_conf['instnace_ip']))
- print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instnace_ip']))
+ print("DLab UI HTTP URL: http://{}".format(ssn_conf['instance_host']))
+ print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instance_host']))
try:
with open('jenkins_creds.txt') as f:
@@ -474,7 +482,7 @@
if os.environ['azure_datalake_enable'] == 'false':
res = {"service_base_name": ssn_conf['service_base_name'],
"instance_name": ssn_conf['instance_name'],
- "instance_hostname": ssn_conf['instnace_ip'],
+ "instance_hostname": ssn_conf['instance_host'],
"master_keyname": os.environ['conf_key_name'],
"vpc_id": ssn_conf['vpc_name'],
"subnet_id": ssn_conf['subnet_name'],
@@ -485,13 +493,13 @@
else:
res = {"service_base_name": ssn_conf['service_base_name'],
"instance_name": ssn_conf['instance_name'],
- "instance_hostname": ssn_conf['instnace_ip'],
+ "instance_hostname": ssn_conf['instance_host'],
"master_keyname": os.environ['conf_key_name'],
"vpc_id": ssn_conf['vpc_name'],
"subnet_id": ssn_conf['subnet_name'],
"security_id": ssn_conf['security_group_name'],
"instance_shape": os.environ['azure_ssn_instance_size'],
- "datalake_name": datalake_store_name,
+ "datalake_name": ssn_conf['datalake_store_full_name'],
"datalake_shared_directory_name": ssn_conf['datalake_shared_directory_name'],
"region": ssn_conf['region'],
"action": "Create SSN instance"}
@@ -501,5 +509,6 @@
params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'], ssn_conf['instnace_ip'])
local("~/scripts/{}.py {}".format('upload_response_file', params))
- except:
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
index bb6c793..408f423 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_prepare.py
@@ -21,13 +21,17 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os, json
+import sys
+import os
+import json
from fabric.api import *
-from dlab.ssn_lib import *
from Crypto.PublicKey import RSA
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
if __name__ == "__main__":
@@ -37,26 +41,33 @@
level=logging.DEBUG,
filename=local_log_filepath)
try:
- instance = 'ssn'
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
+ ssn_conf = dict()
+ ssn_conf['instance'] = 'ssn'
logging.info('[DERIVING NAMES]')
print('[DERIVING NAMES]')
-
- ssn_conf = dict()
# Verify vpc deployment
- if os.environ['conf_network_type'] == 'private' and os.environ.get('azure_vpc_name') == None and os.environ.get('azure_source_vpc_name') == None:
+ if os.environ['conf_network_type'] == 'private' and not os.environ.get('azure_vpc_name') \
+ and not os.environ.get('azure_source_vpc_name'):
raise Exception('Not possible to deploy private environment without predefined vpc or without source vpc')
- if os.environ['conf_network_type'] == 'private' and os.environ.get('azure_resource_group_name') == None and os.environ.get('azure_source_resource_group_name') == None:
- raise Exception('Not possible to deploy private environment without predefined resource_group_name or source_group_name')
- # We need to cut service_base_name to 12 symbols do to the Azure Name length limitation
- ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
+ if os.environ['conf_network_type'] == 'private' and not os.environ.get('azure_resource_group_name') \
+ and not os.environ.get('azure_source_resource_group_name'):
+ raise Exception('Not possible to deploy private environment without predefined resource_group_name '
+ 'or source_group_name')
+ # We need to cut service_base_name to 20 symbols do to the Azure Name length limitation
+ ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'][:20], '-', True)
# Check azure predefined resources
- ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name', ssn_conf['service_base_name'])
- ssn_conf['source_resource_group_name'] = os.environ.get('azure_source_resource_group_name', ssn_conf['resource_group_name'])
+ ssn_conf['resource_group_name'] = os.environ.get('azure_resource_group_name',
+ '{}-resource-group'.format(ssn_conf['service_base_name']))
+ ssn_conf['source_resource_group_name'] = os.environ.get(
+ 'azure_source_resource_group_name', '{}-resource-group'.format(ssn_conf['service_base_name']))
ssn_conf['vpc_name'] = os.environ.get('azure_vpc_name', '{}-vpc'.format(ssn_conf['service_base_name']))
- ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-ssn-subnet'.format(ssn_conf['service_base_name']))
- ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name', '{}-sg'.format(ssn_conf['service_base_name']))
+ ssn_conf['subnet_name'] = os.environ.get('azure_subnet_name', '{}-subnet'.format(ssn_conf['service_base_name']))
+ ssn_conf['security_group_name'] = os.environ.get('azure_security_group_name',
+ '{}-sg'.format(ssn_conf['service_base_name']))
# Default variables
ssn_conf['region'] = os.environ['azure_region']
ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
@@ -69,10 +80,11 @@
if os.environ['conf_network_type'] == 'private':
ssn_conf['static_public_ip_name'] = 'None'
else:
- ssn_conf['static_public_ip_name'] = '{}-ssn-ip'.format(ssn_conf['service_base_name'])
- key = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']), 'rb').read())
+ ssn_conf['static_public_ip_name'] = '{}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+ ssn_conf['key'] = RSA.importKey(open('{}{}.pem'.format(os.environ['conf_key_dir'],
+ os.environ['conf_key_name']), 'rb').read())
ssn_conf['instance_storage_account_type'] = 'Premium_LRS'
- ssn_conf['public_ssh_key'] = key.publickey().exportKey("OpenSSH")
+ ssn_conf['public_ssh_key'] = ssn_conf['key'].publickey().exportKey("OpenSSH")
ssn_conf['instance_tags'] = {"Name": ssn_conf['instance_name'],
"SBN": ssn_conf['service_base_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
@@ -82,11 +94,11 @@
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
ssn_conf['primary_disk_size'] = '32'
except Exception as err:
- print("Failed to generate variables dictionary." + str(err))
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
- if AzureMeta().get_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name']):
- print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+ if AzureMeta.get_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name']):
+ dlab.fab.append_result("Service base name should be unique and less or equal 20 symbols. Please try again.")
sys.exit(1)
try:
@@ -100,8 +112,7 @@
local("~/scripts/{}.py {}".format('ssn_create_resource_group', params))
except Exception as err:
traceback.print_exc()
- print('Error creating resource group: ' + str(err))
- append_result("Failed to create Resource Group. Exception: " + str(err))
+ dlab.fab.append_result("Failed to create Resource Group.", str(err))
sys.exit(1)
try:
@@ -116,13 +127,12 @@
local("~/scripts/{}.py {}".format('ssn_create_vpc', params))
except Exception as err:
traceback.print_exc()
- print('Error creating VPC: ' + str(err))
+ dlab.fab.append_result("Failed to create VPC.", str(err))
try:
if 'azure_resource_group_name' not in os.environ:
- AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
+ AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
except Exception as err:
- print("Resources hasn't been removed: " + str(err))
- append_result("Failed to create VPC. Exception: " + str(err))
+ dlab.fab.append_result("Resources hasn't been removed.", str(err))
sys.exit(1)
try:
@@ -138,15 +148,15 @@
local("~/scripts/{}.py {}".format('common_create_subnet', params))
except Exception as err:
traceback.print_exc()
- print('Error creating Subnet: ' + str(err))
+ dlab.fab.append_result("Failed to create Subnet.", str(err))
try:
- if 'azure_resource_group_name' not in os.environ:
- AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
if 'azure_vpc_name' not in os.environ:
- AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ if 'azure_resource_group_name' not in os.environ:
+ AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
except Exception as err:
- print("Resources hasn't been removed: " + str(err))
- append_result("Failed to create Subnet. Exception: " + str(err))
+ print("Resources hasn't been removed: {}".format(str(err)))
+ dlab.fab.append_result("Resources hasn't been removed.", str(err))
sys.exit(1)
try:
@@ -154,20 +164,21 @@
logging.info('[CREATING VPC PEERING]')
print("[CREATING VPC PEERING]")
params = "--source_resource_group_name {} --destination_resource_group_name {} " \
- "--source_virtual_network_name {} --destination_virtual_network_name {}".format(ssn_conf['source_resource_group_name'],
- ssn_conf['resource_group_name'], os.environ['azure_source_vpc_name'], ssn_conf['vpc_name'])
+ "--source_virtual_network_name {} --destination_virtual_network_name {}".format(
+ ssn_conf['source_resource_group_name'], ssn_conf['resource_group_name'],
+ os.environ['azure_source_vpc_name'], ssn_conf['vpc_name'])
local("~/scripts/{}.py {}".format('ssn_create_peering', params))
except Exception as err:
traceback.print_exc()
- print('Error creating VPC peering: ' + str(err))
try:
- if 'azure_resource_group_name' not in os.environ:
- AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
if 'azure_vpc_name' not in os.environ:
- AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ if 'azure_resource_group_name' not in os.environ:
+ AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
except Exception as err:
print("Resources hasn't been removed: " + str(err))
- append_result("Failed to create VPC peering. Exception: " + str(err))
+ dlab.fab.append_result("Resources hasn't been removed.", str(err))
+ dlab.fab.append_result("Failed to create VPC peering.", str(err))
sys.exit(1)
try:
@@ -229,18 +240,18 @@
local("~/scripts/{}.py {}".format('common_create_security_group', params))
except Exception as err:
traceback.print_exc()
- print('Error creating Security group: ' + str(err))
+ dlab.fab.append_result("Error creating Security group", str(err))
try:
- if 'azure_resource_group_name' not in os.environ:
- AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
- if 'azure_vpc_name' not in os.environ:
- AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
if 'azure_subnet_name' not in os.environ:
- AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
- ssn_conf['subnet_name'])
+ AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+ ssn_conf['subnet_name'])
+ if 'azure_vpc_name' not in os.environ:
+ AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ if 'azure_resource_group_name' not in os.environ:
+ AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
except Exception as err:
print("Resources hasn't been removed: " + str(err))
- append_result("Failed to create Security group. Exception: " + str(err))
+ dlab.fab.append_result("Resources hasn't been removed.", str(err))
sys.exit(1)
if os.environ['azure_datalake_enable'] == 'true':
@@ -269,20 +280,19 @@
raise Exception
except Exception as err:
traceback.print_exc()
- print('Error: {0}'.format(err))
- if 'azure_resource_group_name' not in os.environ:
- AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
- if 'azure_vpc_name' not in os.environ:
- AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
- if 'azure_subnet_name' not in os.environ:
- AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
- ssn_conf['subnet_name'])
- if 'azure_security_group_name' not in os.environ:
- AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
- for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
+ dlab.fab.append_result("Failed to create Data Lake Store.", str(err))
+ for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
- append_result("Failed to create Data Lake Store. Exception:" + str(err))
+ AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+ if 'azure_security_group_name' not in os.environ:
+ AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+ if 'azure_subnet_name' not in os.environ:
+ AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+ ssn_conf['subnet_name'])
+ if 'azure_vpc_name' not in os.environ:
+ AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ if 'azure_resource_group_name' not in os.environ:
+ AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
sys.exit(1)
if os.environ['conf_os_family'] == 'debian':
@@ -308,22 +318,22 @@
local("~/scripts/{}.py {}".format('common_create_instance', params))
except Exception as err:
traceback.print_exc()
- print('Error: {0}'.format(err))
- if 'azure_resource_group_name' not in os.environ:
- AzureActions().remove_resource_group(ssn_conf['service_base_name'], ssn_conf['region'])
- if 'azure_vpc_name' not in os.environ:
- AzureActions().remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
- if 'azure_subnet_name' not in os.environ:
- AzureActions().remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
- ssn_conf['subnet_name'])
- if 'azure_security_group_name' not in os.environ:
- AzureActions().remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
- for datalake in AzureMeta().list_datalakes(ssn_conf['resource_group_name']):
- if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
- AzureActions().delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+ dlab.fab.append_result("Failed to create instance.", str(err))
try:
- AzureActions().remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
+ AzureActions.remove_instance(ssn_conf['resource_group_name'], ssn_conf['instance_name'])
except:
print("The instance {} hasn't been created".format(ssn_conf['instance_name']))
- append_result("Failed to create instance. Exception:" + str(err))
- sys.exit(1)
\ No newline at end of file
+ for datalake in AzureMeta.list_datalakes(ssn_conf['resource_group_name']):
+ if ssn_conf['datalake_store_name'] == datalake.tags["Name"]:
+ AzureActions.delete_datalake_store(ssn_conf['resource_group_name'], datalake.name)
+ if 'azure_security_group_name' not in os.environ:
+ AzureActions.remove_security_group(ssn_conf['resource_group_name'], ssn_conf['security_group_name'])
+ if 'azure_subnet_name' not in os.environ:
+ AzureActions.remove_subnet(ssn_conf['resource_group_name'], ssn_conf['vpc_name'],
+ ssn_conf['subnet_name'])
+ if 'azure_vpc_name' not in os.environ:
+ AzureActions.remove_vpc(ssn_conf['resource_group_name'], ssn_conf['vpc_name'])
+ if 'azure_resource_group_name' not in os.environ:
+ AzureActions.remove_resource_group(ssn_conf['resource_group_name'], ssn_conf['region'])
+
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
index bf2f91e..c709929 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/ssn_terminate.py
@@ -21,111 +21,115 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
-import sys, os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import sys
+import os
from fabric.api import *
-from dlab.ssn_lib import *
+import logging
+import traceback
+import json
def terminate_ssn_node(resource_group_name, service_base_name, vpc_name, region):
print("Terminating instances")
try:
- for vm in AzureMeta().compute_client.virtual_machines.list(resource_group_name):
+ for vm in AzureMeta.compute_client.virtual_machines.list(resource_group_name):
if service_base_name == vm.tags["SBN"]:
- AzureActions().remove_instance(resource_group_name, vm.name)
+ AzureActions.remove_instance(resource_group_name, vm.name)
print("Instance {} has been terminated".format(vm.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate instances", str(err))
sys.exit(1)
print("Removing network interfaces")
try:
- for network_interface in AzureMeta().list_network_interfaces(resource_group_name):
+ for network_interface in AzureMeta.list_network_interfaces(resource_group_name):
if service_base_name == network_interface.tags["SBN"]:
- AzureActions().delete_network_if(resource_group_name, network_interface.name)
+ AzureActions.delete_network_if(resource_group_name, network_interface.name)
print("Network interface {} has been removed".format(network_interface.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove network interfaces", str(err))
sys.exit(1)
print("Removing static public IPs")
try:
- for static_public_ip in AzureMeta().list_static_ips(resource_group_name):
+ for static_public_ip in AzureMeta.list_static_ips(resource_group_name):
if service_base_name == static_public_ip.tags["SBN"]:
- AzureActions().delete_static_public_ip(resource_group_name, static_public_ip.name)
+ AzureActions.delete_static_public_ip(resource_group_name, static_public_ip.name)
print("Static public IP {} has been removed".format(static_public_ip.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove static IPs", str(err))
sys.exit(1)
print("Removing disks")
try:
- for disk in AzureMeta().list_disks(resource_group_name):
+ for disk in AzureMeta.list_disks(resource_group_name):
if service_base_name == disk.tags["SBN"]:
- AzureActions().remove_disk(resource_group_name, disk.name)
+ AzureActions.remove_disk(resource_group_name, disk.name)
print("Disk {} has been removed".format(disk.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove disks", str(err))
sys.exit(1)
print("Removing storage accounts")
try:
- for storage_account in AzureMeta().list_storage_accounts(resource_group_name):
+ for storage_account in AzureMeta.list_storage_accounts(resource_group_name):
if service_base_name == storage_account.tags["SBN"]:
- AzureActions().remove_storage_account(resource_group_name, storage_account.name)
+ AzureActions.remove_storage_account(resource_group_name, storage_account.name)
print("Storage account {} has been terminated".format(storage_account.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove storage accounts", str(err))
sys.exit(1)
print("Removing Data Lake Store")
try:
- for datalake in AzureMeta().list_datalakes(resource_group_name):
+ for datalake in AzureMeta.list_datalakes(resource_group_name):
if service_base_name == datalake.tags["SBN"]:
- AzureActions().delete_datalake_store(resource_group_name, datalake.name)
+ AzureActions.delete_datalake_store(resource_group_name, datalake.name)
print("Data Lake Store {} has been terminated".format(datalake.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove Data Lake", str(err))
sys.exit(1)
print("Removing images")
try:
- for image in AzureMeta().list_images():
+ for image in AzureMeta.list_images():
if service_base_name == image.tags["SBN"]:
- AzureActions().remove_image(resource_group_name, image.name)
+ AzureActions.remove_image(resource_group_name, image.name)
print("Image {} has been removed".format(image.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove images", str(err))
sys.exit(1)
print("Removing security groups")
try:
- for sg in AzureMeta().network_client.network_security_groups.list(resource_group_name):
+ for sg in AzureMeta.network_client.network_security_groups.list(resource_group_name):
if service_base_name == sg.tags["SBN"]:
- AzureActions().remove_security_group(resource_group_name, sg.name)
+ AzureActions.remove_security_group(resource_group_name, sg.name)
print("Security group {} has been terminated".format(sg.name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove security groups", str(err))
sys.exit(1)
print("Removing VPC")
try:
- if AzureMeta().get_vpc(resource_group_name, service_base_name + '-vpc'):
- AzureActions().remove_vpc(resource_group_name, vpc_name)
+ if AzureMeta.get_vpc(resource_group_name, service_base_name + '-vpc'):
+ AzureActions.remove_vpc(resource_group_name, vpc_name)
print("VPC {} has been terminated".format(vpc_name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove VPC", str(err))
sys.exit(1)
print("Removing Resource Group")
try:
- if AzureMeta().get_resource_group(service_base_name):
- AzureActions().remove_resource_group(service_base_name, region)
+ if AzureMeta.get_resource_group(resource_group_name):
+ AzureActions.remove_resource_group(resource_group_name, region)
print("Resource group {} has been terminated".format(vpc_name))
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove resource group", str(err))
sys.exit(1)
@@ -136,12 +140,14 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
ssn_conf = dict()
- ssn_conf['service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].replace('_', '-')[:12], '-', True)
- ssn_conf['resource_group_name'] = replace_multi_symbols(
- os.environ['azure_resource_group_name'].replace('_', '-')[:12], '-', True)
+ ssn_conf['service_base_name'] = dlab.fab.replace_multi_symbols(os.environ['conf_service_base_name'][:20],
+ '-', True)
+ ssn_conf['resource_group_name'] = os.environ.get(
+ 'azure_source_resource_group_name', '{}-resource-group'.format(ssn_conf['service_base_name']))
ssn_conf['region'] = os.environ['azure_region']
ssn_conf['vpc_name'] = os.environ['azure_vpc_name']
@@ -155,8 +161,7 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to terminate ssn.", str(err))
+ dlab.fab.append_result("Failed to terminate ssn.", str(err))
sys.exit(1)
try:
@@ -165,6 +170,6 @@
"Action": "Terminate ssn with all service_base_name environment"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
index 107f52d..914f686 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/tensor_configure.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import traceback
+from fabric.api import *
if __name__ == "__main__":
@@ -41,33 +43,36 @@
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['user_keyname'] = os.environ['project_name']
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
notebook_config['image_enabled'] = os.environ['conf_image_enabled']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
if notebook_config['shared_image_enabled'] == 'false':
notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
notebook_config['project_name'],
+ notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"project_tag": notebook_config['project_tag'],
@@ -79,7 +84,7 @@
notebook_config['service_base_name'],
notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"endpoint_tag": notebook_config['endpoint_tag'],
@@ -87,7 +92,8 @@
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
@@ -96,44 +102,45 @@
"endpoint_tag": notebook_config['endpoint_tag'],
"Exploratory": notebook_config['exploratory_name'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
# generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'],
- notebook_config['endpoint_name'])
- edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
+ notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+ os.environ['azure_region'])
if os.environ['conf_network_type'] == 'private':
- edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
else:
- edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = notebook_config['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
except Exception as err:
- append_result("Failed to generate variables dictionary", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +148,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -152,16 +158,16 @@
print('[CONFIGURE PROXY ON TENSOR INSTANCE]')
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
- .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ .format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -177,9 +183,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring TensorFlow and all dependencies
@@ -188,21 +193,20 @@
print('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
params = "--hostname {0} --keyfile {1} " \
"--region {2} --os_user {3} " \
- "--ip_adress {4} --exploratory_name {5} --edge_ip {6}" \
+ "--ip_address {4} --exploratory_name {5} --edge_ip {6}" \
.format(instance_hostname, keyfile_name,
os.environ['azure_region'], notebook_config['dlab_ssh_user'],
notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
try:
local("~/scripts/{}.py {}".format('configure_tensor_node', params))
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
- os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure TensorFlow.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -218,9 +222,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -231,12 +234,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -252,44 +254,45 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+ image = AzureMeta.get_image(notebook_config['resource_group_name'],
+ notebook_config['expected_image_name'])
if image == '':
print("Looks like it's first time we configure notebook server. Creating image.")
- prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
- notebook_config['instance_name'],
- os.environ['azure_region'],
- notebook_config['expected_image_name'],
- json.dumps(notebook_config['image_tags']))
+ dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+ notebook_config['instance_name'],
+ os.environ['azure_region'],
+ notebook_config['expected_image_name'],
+ json.dumps(notebook_config['image_tags']))
print("Image was successfully created.")
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
- if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
- notebook_config['instance_name']) == 'running':
+ if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+ notebook_config['instance_name']) == 'running':
instance_running = True
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
- 'http://{}:3128'.format(edge_instance_private_hostname))
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+ 'http://{}:3128'.format(edge_instance_private_hostname))
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name,
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -314,18 +317,17 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
try:
- ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
tensorboard_url = "http://" + ip_address + ":6006/"
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
@@ -374,7 +376,6 @@
]}
result.write(json.dumps(res))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate output information.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to generate output information.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
index e73c023..91eb529 100644
--- a/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/azure/zeppelin_configure.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import traceback
+from fabric.api import *
if __name__ == "__main__":
@@ -40,33 +42,36 @@
level=logging.DEBUG,
filename=local_log_filepath)
try:
+ AzureMeta = dlab.meta_lib.AzureMeta()
+ AzureActions = dlab.actions_lib.AzureActions()
notebook_config = dict()
try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
+ notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['instance_size'] = os.environ['azure_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['user_keyname'] = os.environ['project_name']
- notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
- notebook_config['project_tag'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name']
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['user_keyname'] = notebook_config['project_name']
notebook_config['instance_name'] = '{}-{}-{}-nb-{}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
notebook_config['image_enabled'] = os.environ['conf_image_enabled']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
if notebook_config['shared_image_enabled'] == 'false':
notebook_config['expected_image_name'] = '{0}-{1}-{2}-{3}-notebook-image'.format(
notebook_config['service_base_name'],
- notebook_config['endpoint_name'],
notebook_config['project_name'],
+ notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"project_tag": notebook_config['project_tag'],
@@ -78,7 +83,7 @@
notebook_config['service_base_name'],
notebook_config['endpoint_name'],
os.environ['application'])
- notebook_config['image_tags'] = {"Name": notebook_config['instance_name'],
+ notebook_config['image_tags'] = {"Name": notebook_config['expected_image_name'],
"SBN": notebook_config['service_base_name'],
"User": notebook_config['user_name'],
"endpoint_tag": notebook_config['endpoint_tag'],
@@ -86,7 +91,8 @@
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['security_group_name'] = '{}-{}-{}-nb-sg'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'])
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['tags'] = {"Name": notebook_config['instance_name'],
"SBN": notebook_config['service_base_name'],
@@ -94,46 +100,46 @@
"project_tag": notebook_config['project_tag'],
"endpoint_tag": notebook_config['endpoint_tag'],
"Exploratory": notebook_config['exploratory_name'],
- "product": "dlab"}
- notebook_config['ip_address'] = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ notebook_config['ip_address'] = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
# generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
notebook_config['endpoint_name'])
- edge_instance_private_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_private_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
+ notebook_config['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
+ os.environ['azure_region'])
if os.environ['conf_network_type'] == 'private':
- edge_instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
else:
- edge_instance_hostname = AzureMeta().get_instance_public_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_instance_hostname = notebook_config['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- edge_instance_name)
+ edge_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ edge_instance_name)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate variables dictionary.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem",
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -141,9 +147,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -160,9 +165,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result('Unable to configure proxy on zeppelin notebook. Exception: ' + str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
@@ -178,9 +182,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring zeppelin and all dependencies
@@ -199,25 +202,24 @@
"--zeppelin_version {10} --scala_version {11} " \
"--livy_version {12} --multiple_clusters {13} " \
"--r_mirror {14} --endpoint_url {15} " \
- "--ip_adress {16} --exploratory_name {17} --edge_ip {18} " \
+ "--ip_address {16} --exploratory_name {17} --edge_ip {18} " \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, os.environ['azure_region'],
- json.dumps(additional_config), notebook_config['dlab_ssh_user'], os.environ['notebook_spark_version'],
- os.environ['notebook_hadoop_version'], edge_instance_private_hostname, '3128',
- os.environ['notebook_zeppelin_version'], os.environ['notebook_scala_version'],
- os.environ['notebook_livy_version'], os.environ['notebook_multiple_clusters'],
- os.environ['notebook_r_mirror'], 'null',
+ json.dumps(additional_config), notebook_config['dlab_ssh_user'],
+ os.environ['notebook_spark_version'], os.environ['notebook_hadoop_version'],
+ edge_instance_private_hostname, '3128', os.environ['notebook_zeppelin_version'],
+ os.environ['notebook_scala_version'], os.environ['notebook_livy_version'],
+ os.environ['notebook_multiple_clusters'], os.environ['notebook_r_mirror'], 'null',
notebook_config['ip_address'], notebook_config['exploratory_name'], edge_hostname)
try:
local("~/scripts/{}.py {}".format('configure_zeppelin_node', params))
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
- os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem")
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure zeppelin.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -233,9 +235,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -246,12 +247,11 @@
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -267,44 +267,45 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to post configuring instance.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to post configuring instance.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- image = AzureMeta().get_image(notebook_config['resource_group_name'], notebook_config['expected_image_name'])
+ image = AzureMeta.get_image(notebook_config['resource_group_name'],
+ notebook_config['expected_image_name'])
if image == '':
print("Looks like it's first time we configure notebook server. Creating image.")
- prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- AzureActions().create_image_from_instance(notebook_config['resource_group_name'],
- notebook_config['instance_name'],
- os.environ['azure_region'],
- notebook_config['expected_image_name'],
- json.dumps(notebook_config['image_tags']))
+ dlab.actions_lib.prepare_vm_for_image(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ AzureActions.create_image_from_instance(notebook_config['resource_group_name'],
+ notebook_config['instance_name'],
+ os.environ['azure_region'],
+ notebook_config['expected_image_name'],
+ json.dumps(notebook_config['image_tags']))
print("Image was successfully created.")
local("~/scripts/{}.py".format('common_prepare_notebook'))
instance_running = False
while not instance_running:
- if AzureMeta().get_instance_status(notebook_config['resource_group_name'],
- notebook_config['instance_name']) == 'running':
+ if AzureMeta.get_instance_status(notebook_config['resource_group_name'],
+ notebook_config['instance_name']) == 'running':
instance_running = True
- instance_hostname = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
- remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
- set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
- 'http://{}:3128'.format(edge_instance_private_hostname))
+ instance_hostname = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
+ dlab.actions_lib.remount_azure_disk(True, notebook_config['dlab_ssh_user'], instance_hostname,
+ keyfile_name)
+ dlab.fab.set_git_proxy(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name,
+ 'http://{}:3128'.format(edge_instance_private_hostname))
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], keyfile_name,
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
try:
@@ -329,18 +330,17 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
try:
- ip_address = AzureMeta().get_private_ip_address(notebook_config['resource_group_name'],
- notebook_config['instance_name'])
+ ip_address = AzureMeta.get_private_ip_address(notebook_config['resource_group_name'],
+ notebook_config['instance_name'])
zeppelin_ip_url = "http://" + ip_address + ":8080/"
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
@@ -379,7 +379,6 @@
]}
result.write(json.dumps(res))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to generate output information.", str(err))
- AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to generate output information.", str(err))
+ AzureActions.remove_instance(notebook_config['resource_group_name'], notebook_config['instance_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
index 311a805..f39c138 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine-service.py
@@ -24,11 +24,20 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+import traceback
import uuid
+from fabric.api import *
+
+
+def clear_resources():
+ GCPActions.delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
+ GCPActions.remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
+ os.environ['dataproc_version'], os.environ['conf_os_user'],
+ notebook_config['key_path'])
if __name__ == "__main__":
@@ -40,62 +49,72 @@
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
notebook_config['endpoint_name'])
- notebook_config['cluster_name'] = meta_lib.GCPMeta().get_not_configured_dataproc(notebook_config['notebook_name'])
- notebook_config['notebook_ip'] = meta_lib.GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+ notebook_config['cluster_name'] = GCPMeta.get_not_configured_dataproc(notebook_config['notebook_name'])
+ notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
notebook_config['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = meta_lib.GCPMeta().get_private_ip_address(edge_instance_name)
+ edge_instance_hostname = GCPMeta.get_private_ip_address(edge_instance_name)
if os.environ['application'] == 'deeplearning':
application = 'jupyter'
else:
application = os.environ['application']
+
+ additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "" ).replace(
+ "'}", "").lower()
+
notebook_config['cluster_labels'] = {
os.environ['notebook_instance_name']: "configured",
"name": notebook_config['cluster_name'],
"sbn": notebook_config['service_base_name'],
- "user": notebook_config['edge_user_name'],
"notebook_name": os.environ['notebook_instance_name'],
- "project_tag": notebook_config['project_tag'],
- "endpoint_tag": notebook_config['endpoint_tag'],
"product": "dlab",
- "computational_name": (os.environ['computational_name']).lower().replace('_', '-')
+ "computational_name": (os.environ['computational_name'].replace('_', '-').lower())
}
+ for tag in additional_tags.split(','):
+ label_key = tag.split(':')[0]
+ label_value = tag.split(':')[1].replace('_', '-')
+ if '@' in label_value:
+ label_value = label_value[:label_value.find('@')]
+ if label_value != '':
+ notebook_config['cluster_labels'].update({label_key: label_value})
+
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
- params = "--bucket {} --cluster_name {} --dataproc_version {} --keyfile {} --notebook_ip {} --region {} --edge_user_name {} --project_name {} --os_user {} --edge_hostname {} --proxy_port {} --scala_version {} --application {} --pip_mirror {}" \
+ params = "--bucket {} --cluster_name {} --dataproc_version {} --keyfile {} --notebook_ip {} --region {} " \
+ "--edge_user_name {} --project_name {} --os_user {} --edge_hostname {} --proxy_port {} " \
+ "--scala_version {} --application {} --pip_mirror {}" \
.format(notebook_config['bucket_name'], notebook_config['cluster_name'], os.environ['dataproc_version'],
notebook_config['key_path'], notebook_config['notebook_ip'], os.environ['gcp_region'],
- notebook_config['edge_user_name'], notebook_config['project_name'], os.environ['conf_os_user'], edge_instance_hostname, '3128',
- os.environ['notebook_scala_version'], os.environ['application'], os.environ['conf_pypi_mirror'])
+ notebook_config['edge_user_name'], notebook_config['project_name'], os.environ['conf_os_user'],
+ edge_instance_hostname, '3128', os.environ['notebook_scala_version'], os.environ['application'],
+ os.environ['conf_pypi_mirror'])
try:
local("~/scripts/{}_{}.py {}".format(application, 'install_dataengine-service_kernels', params))
- actions_lib.GCPActions().update_dataproc_cluster(notebook_config['cluster_name'],
- notebook_config['cluster_labels'])
+ GCPActions.update_dataproc_cluster(notebook_config['cluster_name'], notebook_config['cluster_labels'])
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing Dataproc kernels.", str(err))
- actions_lib.GCPActions().delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
- actions_lib.GCPActions().remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
- os.environ['dataproc_version'], os.environ['conf_os_user'], notebook_config['key_path'])
+ clear_resources()
+ dlab.fab.append_result("Failed installing Dataproc kernels.", str(err))
sys.exit(1)
try:
@@ -113,11 +132,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure Spark.", str(err))
- actions_lib.GCPActions().delete_dataproc_cluster(notebook_config['cluster_name'], os.environ['gcp_region'])
- actions_lib.GCPActions().remove_kernels(notebook_config['notebook_name'], notebook_config['cluster_name'],
- os.environ['dataproc_version'], os.environ['conf_os_user'], notebook_config['key_path'])
+ dlab.fab.append_result("Failed to configure Spark.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -127,6 +143,7 @@
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
index adf1f0b..08c4c02 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_notebook_configure_dataengine.py
@@ -24,11 +24,20 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
+from fabric.api import *
+
+
+def clear_resources():
+ for i in range(notebook_config['instance_count'] - 1):
+ slave_name = notebook_config['slave_node_name'] + '{}'.format(i + 1)
+ GCPActions.remove_instance(slave_name, notebook_config['zone'])
+ GCPActions.remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
if __name__ == "__main__":
@@ -40,53 +49,53 @@
filename=local_log_filepath)
try:
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
# generating variables dictionary
print('Generating infrastructure names and tags')
notebook_config = dict()
- try:
- notebook_config['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+ else:
notebook_config['exploratory_name'] = ''
- try:
- notebook_config['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
notebook_config['computational_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['region'] = os.environ['gcp_region']
notebook_config['zone'] = os.environ['gcp_zone']
- notebook_config['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- notebook_config['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
- '-de-' + notebook_config['exploratory_name'] + '-' + \
- notebook_config['computational_name']
+ notebook_config['user_name'] = os.environ['edge_user_name']
+ notebook_config['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+ notebook_config['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+ notebook_config['cluster_name'] = "{}-{}-{}-de-{}".format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['computational_name'])
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
- notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+ notebook_config['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
- notebook_config['spark_master_ip'] = GCPMeta().get_private_ip_address(notebook_config['master_node_name'])
- notebook_config['notebook_ip'] = GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+ notebook_config['spark_master_ip'] = GCPMeta.get_private_ip_address(notebook_config['master_node_name'])
+ notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to get instance IP address", str(err))
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, notebook_config['zone'])
- GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
- append_result("Failed to generate infrastructure names", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
- params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4} --keyfile {5}" \
- " --notebook_ip {6} --spark_master_ip {7}".\
+ params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
+ " --keyfile {5} --notebook_ip {6} --spark_master_ip {7}".\
format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
notebook_config['spark_master_url'], notebook_config['key_path'],
@@ -97,12 +106,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, notebook_config['zone'])
- GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
- append_result("Failed installing Dataengine kernels.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
@@ -122,12 +127,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(notebook_config['instance_count'] - 1):
- slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, notebook_config['zone'])
- GCPActions().remove_instance(notebook_config['master_node_name'], notebook_config['zone'])
- append_result("Failed to configure Spark.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
@@ -136,6 +137,7 @@
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
index 7f14dab..c83208b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_prepare_notebook.py
@@ -24,10 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
import os
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+from fabric.api import *
+
if __name__ == "__main__":
instance_class = 'notebook'
@@ -37,134 +40,159 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- print('Generating infrastructure names and tags')
- notebook_config = dict()
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['region'] = os.environ['gcp_region']
- notebook_config['zone'] = os.environ['gcp_zone']
-
- edge_status = GCPMeta().get_instance_status('{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'],
- notebook_config['endpoint_tag']))
- if edge_status != 'RUNNING':
- logging.info('ERROR: Edge node is unavailable! Aborting...')
- print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = GCPMeta().get_private_ip_address(notebook_config['service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
- ssn_hostname)
- append_result("Edge node is unavailable")
- sys.exit(1)
-
try:
- if os.environ['gcp_vpc_name'] == '':
- raise KeyError
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ print('Generating infrastructure names and tags')
+ notebook_config = dict()
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['region'] = os.environ['gcp_region']
+ notebook_config['zone'] = os.environ['gcp_zone']
+
+ edge_status = GCPMeta.get_instance_status('{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_tag']))
+ if edge_status != 'RUNNING':
+ logging.info('ERROR: Edge node is unavailable! Aborting...')
+ print('ERROR: Edge node is unavailable! Aborting...')
+ ssn_hostname = GCPMeta.get_private_ip_address(notebook_config['service_base_name'] + '-ssn')
+ dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+ ssn_hostname)
+ dlab.fab.append_result("Edge node is unavailable")
+ sys.exit(1)
+
+ try:
+ if os.environ['gcp_vpc_name'] == '':
+ raise KeyError
+ else:
+ notebook_config['vpc_name'] = os.environ['gcp_vpc_name']
+ except KeyError:
+ notebook_config['vpc_name'] = '{}-vpc'.format(notebook_config['service_base_name'])
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['subnet_name'] = '{0}-{1}-{2}-subnet'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_tag'])
+ notebook_config['instance_size'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['notebook_service_account_name'] = '{}-{}-{}-ps-sa'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+
+ if os.environ['conf_os_family'] == 'debian':
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
+ if os.environ['conf_os_family'] == 'redhat':
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(
+ os.environ['application'])
+ notebook_config['secondary_disk_size'] = os.environ['notebook_disk_size']
+
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_tag'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_tag'],
+ os.environ['application'])
else:
- notebook_config['vpc_name'] = os.environ['gcp_vpc_name']
- except KeyError:
- notebook_config['vpc_name'] = '{}-ssn-vpc'.format(notebook_config['service_base_name'])
- try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['subnet_name'] = '{0}-{1}-subnet'.format(notebook_config['service_base_name'],
- notebook_config['project_name'])
- notebook_config['instance_size'] = os.environ['gcp_notebook_instance_size']
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['notebook_service_account_name'] = '{}-{}-ps'.format(notebook_config['service_base_name'],
- notebook_config['project_name']).replace('_', '-')
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['notebook_primary_image_name'] = \
+ (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
+ else notebook_config['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
+ print('Searching pre-configured images')
+ notebook_config['primary_image_name'] = GCPMeta.get_image_by_name(
+ notebook_config['expected_primary_image_name'])
+ if notebook_config['primary_image_name'] == '':
+ notebook_config['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+ else:
+ print('Pre-configured primary image found. Using: {}'.format(
+ notebook_config['primary_image_name'].get('name')))
+ notebook_config['primary_image_name'] = 'global/images/{}'.format(
+ notebook_config['primary_image_name'].get('name'))
- if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
- if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['primary_disk_size'] = (lambda x: '30' if x == 'deeplearning' else '12')(os.environ['application'])
- notebook_config['secondary_disk_size'] = os.environ['notebook_disk_size']
+ notebook_config['secondary_image_name'] = GCPMeta.get_image_by_name(
+ notebook_config['expected_secondary_image_name'])
+ if notebook_config['secondary_image_name'] == '':
+ notebook_config['secondary_image_name'] = 'None'
+ else:
+ print('Pre-configured secondary image found. Using: {}'.format(
+ notebook_config['secondary_image_name'].get('name')))
+ notebook_config['secondary_image_name'] = 'global/images/{}'.format(
+ notebook_config['secondary_image_name'].get('name'))
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
- else notebook_config['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
- print('Searching pre-configured images')
- notebook_config['primary_image_name'] = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
- if notebook_config['primary_image_name'] == '':
- notebook_config['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
- else:
- print('Pre-configured primary image found. Using: {}'.format(notebook_config['primary_image_name'].get('name')))
- notebook_config['primary_image_name'] = 'global/images/{}'.format(notebook_config['primary_image_name'].get('name'))
+ notebook_config['gpu_accelerator_type'] = 'None'
- notebook_config['secondary_image_name'] = GCPMeta().get_image_by_name(notebook_config['expected_secondary_image_name'])
- if notebook_config['secondary_image_name'] == '':
- notebook_config['secondary_image_name'] = 'None'
- else:
- print('Pre-configured secondary image found. Using: {}'.format(notebook_config['secondary_image_name'].get('name')))
- notebook_config['secondary_image_name'] = 'global/images/{}'.format(notebook_config['secondary_image_name'].get('name'))
+ if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
+ notebook_config['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
- notebook_config['gpu_accelerator_type'] = 'None'
+ notebook_config['network_tag'] = '{0}-{1}-{2}-ps'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
- if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
- notebook_config['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
+ with open('/root/result.json', 'w') as f:
+ data = {"notebook_name": notebook_config['instance_name'], "error": ""}
+ json.dump(data, f)
- notebook_config['network_tag'] = '{0}-{1}-ps'.format(notebook_config['service_base_name'],
- notebook_config['project_name'])
+ additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "" ).replace(
+ "'}", "").lower()
- with open('/root/result.json', 'w') as f:
- data = {"notebook_name": notebook_config['instance_name'], "error": ""}
- json.dump(data, f)
+ print('Additional tags will be added: {}'.format(additional_tags))
+ notebook_config['labels'] = {"name": notebook_config['instance_name'],
+ "sbn": notebook_config['service_base_name'],
+ "product": "dlab"
+ }
- additional_tags = os.environ['tags'].replace("': u'", ": ").replace("', u'", ", ").replace("{u'", "" ).replace("'}", "")
- print('Additional tags will be added: {}'.format(additional_tags))
-
- notebook_config['labels'] = {"name": notebook_config['instance_name'],
- "sbn": notebook_config['service_base_name'],
- "project_tag": notebook_config['project_tag'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "user": notebook_config['edge_user_name'],
- "product": "dlab",
- }
+ for tag in additional_tags.split(','):
+ label_key = tag.split(':')[0]
+ label_value = tag.split(':')[1].replace('_', '-')
+ if '@' in label_value:
+ label_value = label_value[:label_value.find('@')]
+ if label_value != '':
+ notebook_config['labels'].update({label_key: label_value})
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ sys.exit(1)
# launching instance for notebook server
try:
logging.info('[CREATE NOTEBOOK INSTANCE]')
print('[CREATE NOTEBOOK INSTANCE]')
params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
"--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
- "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
- "--gpu_accelerator_type {14} --network_tag {15} --labels '{16}' --service_base_name {17}".\
+ "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+ "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --labels '{16}' " \
+ "--service_base_name {17}".\
format(notebook_config['instance_name'], notebook_config['region'], notebook_config['zone'],
notebook_config['vpc_name'], notebook_config['subnet_name'], notebook_config['instance_size'],
- notebook_config['ssh_key_path'], initial_user, notebook_config['notebook_service_account_name'],
- notebook_config['primary_image_name'], notebook_config['secondary_image_name'], 'notebook',
- notebook_config['primary_disk_size'], notebook_config['secondary_disk_size'],
- notebook_config['gpu_accelerator_type'], notebook_config['network_tag'],
- json.dumps(notebook_config['labels']), notebook_config['service_base_name'])
+ notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['notebook_service_account_name'], notebook_config['primary_image_name'],
+ notebook_config['secondary_image_name'], 'notebook', notebook_config['primary_disk_size'],
+ notebook_config['secondary_disk_size'], notebook_config['gpu_accelerator_type'],
+ notebook_config['network_tag'], json.dumps(notebook_config['labels']),
+ notebook_config['service_base_name'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create instance.", str(err))
- GCPActions().remove_disk(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to create instance.", str(err))
+ GCPActions.remove_disk(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
index b9c8a08..2d8fc8e 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_start_notebook.py
@@ -24,12 +24,14 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
import argparse
+from fabric.api import *
if __name__ == "__main__":
@@ -40,9 +42,11 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['zone'] = os.environ['gcp_zone']
@@ -51,10 +55,10 @@
print('[START NOTEBOOK]')
try:
print("Starting notebook")
- GCPActions().start_instance(notebook_config['notebook_name'], notebook_config['zone'])
+ GCPActions.start_instance(notebook_config['notebook_name'], notebook_config['zone'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to start notebook.", str(err))
+ dlab.fab.append_result("Failed to start notebook.", str(err))
raise Exception
except:
sys.exit(1)
@@ -62,7 +66,7 @@
try:
logging.info('[SETUP USER GIT CREDENTIALS]')
print('[SETUP USER GIT CREDENTIALS]')
- notebook_config['notebook_ip'] = GCPMeta().get_private_ip_address(notebook_config['notebook_name'])
+ notebook_config['notebook_ip'] = GCPMeta.get_private_ip_address(notebook_config['notebook_name'])
notebook_config['keyfile'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(os.environ['conf_os_user'], notebook_config['notebook_ip'], notebook_config['keyfile'])
@@ -70,7 +74,7 @@
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed to setup git credentials.", str(err))
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
raise Exception
except:
sys.exit(1)
@@ -84,7 +88,7 @@
local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed to update last activity time.", str(err))
+ dlab.fab.append_result("Failed to update last activity time.", str(err))
raise Exception
except:
sys.exit(1)
@@ -101,8 +105,6 @@
"Action": "Start up notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
-
-
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
index f336a0b..bcd431b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_stop_notebook.py
@@ -24,9 +24,9 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import uuid
import argparse
@@ -39,31 +39,31 @@
labels = [
{instance_name: '*'}
]
- clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+ clusters_list = GCPMeta.get_dataproc_list(labels)
if clusters_list:
for cluster_name in clusters_list:
- computational_name = meta_lib.GCPMeta().get_cluster(cluster_name).get('labels').get(
+ computational_name = GCPMeta.get_cluster(cluster_name).get('labels').get(
'computational_name')
- cluster = meta_lib.GCPMeta().get_list_cluster_statuses([cluster_name])
- actions_lib.GCPActions().bucket_cleanup(bucket_name, project_name, cluster_name)
+ cluster = GCPMeta.get_list_cluster_statuses([cluster_name])
+ GCPActions.bucket_cleanup(bucket_name, project_name, cluster_name)
print('The bucket {} has been cleaned successfully'.format(bucket_name))
- actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+ GCPActions.delete_dataproc_cluster(cluster_name, region)
print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
- actions_lib.GCPActions().remove_kernels(instance_name, cluster_name, cluster[0]['version'], ssh_user,
- key_path, computational_name)
+ GCPActions.remove_kernels(instance_name, cluster_name, cluster[0]['version'], ssh_user,
+ key_path, computational_name)
else:
print("There are no Dataproc clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate dataproc", str(err))
sys.exit(1)
print("Stopping data engine cluster")
try:
- clusters_list = GCPMeta().get_list_instances_by_label(zone, instance_name)
+ clusters_list = GCPMeta.get_list_instances_by_label(zone, instance_name)
if clusters_list.get('items'):
for vm in clusters_list['items']:
try:
- GCPActions().stop_instance(vm['name'], zone)
+ GCPActions.stop_instance(vm['name'], zone)
print("Instance {} has been stopped".format(vm['name']))
except:
pass
@@ -71,15 +71,14 @@
print("There are no data engine clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to stop dataengine cluster", str(err))
sys.exit(1)
print("Stopping notebook")
try:
- GCPActions().stop_instance(instance_name, zone)
+ GCPActions.stop_instance(instance_name, zone)
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to stop notebook.", str(err))
+ dlab.fab.append_result("Failed to stop instance", str(err))
sys.exit(1)
@@ -92,12 +91,14 @@
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
@@ -115,7 +116,7 @@
notebook_config['project_name'])
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed to stop notebook.", str(err))
+ dlab.fab.append_result("Failed to stop notebook.", str(err))
sys.exit(1)
try:
@@ -124,7 +125,6 @@
"Action": "Stop notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
-
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py b/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
index 4b243a0..00d39f5 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/common_terminate_notebook.py
@@ -24,9 +24,10 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
@@ -37,26 +38,26 @@
labels = [
{instance_name: '*'}
]
- clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+ clusters_list = GCPMeta.get_dataproc_list(labels)
if clusters_list:
for cluster_name in clusters_list:
- actions_lib.GCPActions().bucket_cleanup(bucket_name, user_name, cluster_name)
+ GCPActions.bucket_cleanup(bucket_name, user_name, cluster_name)
print('The bucket {} has been cleaned successfully'.format(bucket_name))
- actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+ GCPActions.delete_dataproc_cluster(cluster_name, region)
print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
else:
print("There are no Dataproc clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate dataproc", str(err))
sys.exit(1)
print("Terminating data engine cluster")
try:
- clusters_list = GCPMeta().get_list_instances_by_label(zone, instance_name)
+ clusters_list = GCPMeta.get_list_instances_by_label(zone, instance_name)
if clusters_list.get('items'):
for vm in clusters_list['items']:
try:
- GCPActions().remove_instance(vm['name'], zone)
+ GCPActions.remove_instance(vm['name'], zone)
print("Instance {} has been terminated".format(vm['name']))
except:
pass
@@ -64,15 +65,14 @@
print("There are no data engine clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate dataengine", str(err))
sys.exit(1)
print("Terminating notebook")
try:
- GCPActions().remove_instance(instance_name, zone)
+ GCPActions.remove_instance(instance_name, zone)
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to terminate notebook.", str(err))
+ dlab.fab.append_result("Failed to terminate instance", str(err))
sys.exit(1)
@@ -84,12 +84,14 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
notebook_config = dict()
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['bucket_name'] = '{0}-{1}-{2}-bucket'.format(notebook_config['service_base_name'],
notebook_config['project_name'],
@@ -106,7 +108,7 @@
notebook_config['project_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate notebook.", str(err))
+ dlab.fab.append_result("Failed to terminate notebook.", str(err))
raise Exception
except:
sys.exit(1)
@@ -117,6 +119,6 @@
"Action": "Terminate notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
index 30f9a80..05b9c9b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_configure.py
@@ -24,10 +24,11 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
-from dlab.notebook_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.notebook_lib
+import traceback
import sys
import os
import logging
@@ -35,7 +36,7 @@
def configure_dataengine_service(instance, dataproc_conf):
- dataproc_conf['instance_ip'] = meta_lib.GCPMeta().get_private_ip_address(instance)
+ dataproc_conf['instance_ip'] = GCPMeta.get_private_ip_address(instance)
# configuring proxy on Data Engine service
try:
logging.info('[CONFIGURE PROXY ON DATAENGINE SERVICE]')
@@ -50,9 +51,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
try:
@@ -62,16 +62,15 @@
env['connection_attempts'] = 100
env.key_filename = "{}".format(dataproc_conf['key_path'])
env.host_string = dataproc_conf['dlab_ssh_user'] + '@' + dataproc_conf['instance_ip']
- install_os_pkg(['python-pip', 'python3-pip'])
- configure_data_engine_service_pip(dataproc_conf['instance_ip'], dataproc_conf['dlab_ssh_user'],
- dataproc_conf['key_path'])
+ dlab.notebook_lib.install_os_pkg(['python-pip', 'python3-pip'])
+ dlab.fab.configure_data_engine_service_pip(dataproc_conf['instance_ip'], dataproc_conf['dlab_ssh_user'],
+ dataproc_conf['key_path'])
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure dataengine service.", str(err))
- actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+ dlab.fab.append_result("Failed to configure dataengine service.", str(err))
+ GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
try:
@@ -79,7 +78,7 @@
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
slaves = []
for idx, instance in enumerate(dataproc_conf['cluster_core_instances']):
- slave_ip = meta_lib.GCPMeta().get_private_ip_address(instance)
+ slave_ip = GCPMeta.get_private_ip_address(instance)
slave = {
'name': 'datanode{}'.format(idx + 1),
'ip': slave_ip,
@@ -108,12 +107,11 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure reverse proxy.", str(err))
- actions_lib.GCPActions().delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+ dlab.fab.append_result("Failed to configure reverse proxy.", str(err))
+ GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
@@ -124,55 +122,66 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
- print('Generating infrastructure names and tags')
- dataproc_conf = dict()
try:
- dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- dataproc_conf['exploratory_name'] = ''
- try:
- dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
- except:
- dataproc_conf['computational_name'] = ''
- dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- dataproc_conf['key_name'] = os.environ['conf_key_name']
- dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- dataproc_conf['region'] = os.environ['gcp_region']
- dataproc_conf['zone'] = os.environ['gcp_zone']
- dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'],
- dataproc_conf['project_name'])
- dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'],
- dataproc_conf['project_name'],
- dataproc_conf['exploratory_name'],
- dataproc_conf['computational_name'])
- dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
- dataproc_conf['project_name'])
- dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ print('Generating infrastructure names and tags')
+ dataproc_conf = dict()
+ if 'exploratory_name' in os.environ:
+ dataproc_conf['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+ else:
+ dataproc_conf['exploratory_name'] = ''
+ if 'computational_name' in os.environ:
+ dataproc_conf['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
+ dataproc_conf['computational_name'] = ''
+ dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+ dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+ dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ dataproc_conf['key_name'] = os.environ['conf_key_name']
+ dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ dataproc_conf['region'] = os.environ['gcp_region']
+ dataproc_conf['zone'] = os.environ['gcp_zone']
+ dataproc_conf['subnet'] = '{0}-{1}-{2}-subnet'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'],
+ dataproc_conf['computational_name'])
+ dataproc_conf['cluster_tag'] = '{0}-{1}-{2}-ps'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
- dataproc_conf['release_label'] = os.environ['dataproc_version']
- dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
- dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
- dataproc_conf['project_name'])
- dataproc_conf['dataproc_unique_index'] = GCPMeta().get_index_by_service_account_name(dataproc_conf['dataproc_service_account_name'])
- service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
- dataproc_conf['dataproc_unique_index'],
- os.environ['gcp_project_id'])
+ dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['release_label'] = os.environ['dataproc_version']
+ dataproc_conf['cluster_label'] = {os.environ['notebook_instance_name']: "not-configured"}
+ dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-{2}-ps-sa'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['dataproc_unique_index'] = GCPMeta.get_index_by_service_account_name(
+ dataproc_conf['dataproc_service_account_name'])
+ service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
+ dataproc_conf['dataproc_unique_index'],
+ os.environ['gcp_project_id'])
- dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
- dataproc_conf['project_name'],
- dataproc_conf['endpoint_name'])
- dataproc_conf['edge_instance_hostname'] = GCPMeta().get_instance_public_ip_by_name(
- dataproc_conf['edge_instance_name'])
- dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
- dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
- dataproc_conf['master_ip'] = meta_lib.GCPMeta().get_private_ip_address(dataproc_conf['master_name'])
+ dataproc_conf['edge_instance_name'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['edge_instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(
+ dataproc_conf['edge_instance_name'])
+ dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+ dataproc_conf['master_name'] = dataproc_conf['cluster_name'] + '-m'
+ dataproc_conf['master_ip'] = GCPMeta.get_private_ip_address(dataproc_conf['master_name'])
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+ sys.exit(1)
try:
- res = meta_lib.GCPMeta().get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
+ res = GCPMeta.get_list_instances(os.environ['gcp_zone'], dataproc_conf['cluster_name'])
dataproc_conf['cluster_instances'] = [i.get('name') for i in res['items']]
except Exception as err:
traceback.print_exc()
@@ -194,7 +203,9 @@
for job in jobs:
if job.exitcode != 0:
raise Exception
- except:
+ except Exception as err:
+ GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
+ dlab.fab.append_result("Failed to configure Dataengine-service", str(err))
traceback.print_exc()
raise Exception
@@ -230,6 +241,7 @@
}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ GCPActions.delete_dataproc_cluster(dataproc_conf['cluster_name'], os.environ['gcp_region'])
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
index 004a1c9..7b9d05a 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_create.py
@@ -57,7 +57,7 @@
job_body['job']['placement']['clusterName'] = cluster_name
job_body['job']['pysparkJob']['mainPythonFileUri'] = 'gs://{}/jars_parser.py'.format(args.bucket)
job_body['job']['pysparkJob']['args'][1] = args.bucket
- job_body['job']['pysparkJob']['args'][3] = (os.environ['project_name']).lower().replace('_', '-')
+ job_body['job']['pysparkJob']['args'][3] = (os.environ['project_name']).replace('_', '-').lower()
job_body['job']['pysparkJob']['args'][5] = cluster_name
job_body['job']['pysparkJob']['args'][7] = cluster_version
job_body['job']['pysparkJob']['args'][9] = os.environ['conf_os_user']
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
index 316c9b2..993b8e7 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import sys
import os
import uuid
@@ -42,80 +43,99 @@
level=logging.INFO,
filename=local_log_filepath)
try:
- os.environ['exploratory_name']
- except:
- os.environ['exploratory_name'] = ''
- if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])):
- time.sleep(30)
-
- print('Generating infrastructure names and tags')
- dataproc_conf = dict()
- try:
- dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- dataproc_conf['exploratory_name'] = ''
- try:
- dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
- except:
- dataproc_conf['computational_name'] = ''
- dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- dataproc_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- dataproc_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- dataproc_conf['key_name'] = os.environ['conf_key_name']
- dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- dataproc_conf['region'] = os.environ['gcp_region']
- dataproc_conf['zone'] = os.environ['gcp_zone']
- dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'])
- dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'],
- dataproc_conf['exploratory_name'], dataproc_conf['computational_name'])
- dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['project_name'])
- dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ print('Generating infrastructure names and tags')
+ dataproc_conf = dict()
+ if 'exploratory_name' in os.environ:
+ dataproc_conf['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+ else:
+ dataproc_conf['exploratory_name'] = ''
+ if 'computational_name' in os.environ:
+ dataproc_conf['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
+ dataproc_conf['computational_name'] = ''
+ if os.path.exists('/response/.dataproc_creating_{}'.format(dataproc_conf['exploratory_name'])):
+ time.sleep(30)
+ dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+ dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+ dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ dataproc_conf['project_tag'] = dataproc_conf['project_name']
+ dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ dataproc_conf['endpoint_tag'] = dataproc_conf['endpoint_name']
+ dataproc_conf['key_name'] = os.environ['conf_key_name']
+ dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ dataproc_conf['region'] = os.environ['gcp_region']
+ dataproc_conf['zone'] = os.environ['gcp_zone']
+ dataproc_conf['subnet'] = '{0}-{1}-{2}-subnet'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['cluster_name'] = '{0}-{1}-{2}-des-{3}'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'],
+ dataproc_conf['computational_name'])
+ dataproc_conf['cluster_tag'] = '{0}-{1}-{2}-ps'.format(dataproc_conf['service_base_name'],
dataproc_conf['project_name'],
dataproc_conf['endpoint_name'])
- dataproc_conf['release_label'] = os.environ['dataproc_version']
- dataproc_conf['cluster_labels'] = {
- os.environ['notebook_instance_name']: "not-configured",
- "name": dataproc_conf['cluster_name'],
- "sbn": dataproc_conf['service_base_name'],
- "user": dataproc_conf['edge_user_name'],
- "project_tag": dataproc_conf['project_tag'],
- "endpoint_tag": dataproc_conf['endpoint_tag'],
- "notebook_name": os.environ['notebook_instance_name'],
- "product": "dlab",
- "computational_name": dataproc_conf['computational_name']
- }
- dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
- dataproc_conf['project_name'])
- dataproc_conf['dataproc_unique_index'] = GCPMeta().get_index_by_service_account_name(dataproc_conf['dataproc_service_account_name'])
- service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
- dataproc_conf['dataproc_unique_index'],
- os.environ['gcp_project_id'])
- dataproc_conf['edge_instance_hostname'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
- dataproc_conf['project_name'],
- dataproc_conf['endpoint_name'])
- dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+ dataproc_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['release_label'] = os.environ['dataproc_version']
+ additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "").replace(
+ "'}", "").lower()
- edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname'])
+ dataproc_conf['cluster_labels'] = {
+ os.environ['notebook_instance_name']: "not-configured",
+ "name": dataproc_conf['cluster_name'],
+ "sbn": dataproc_conf['service_base_name'],
+ "notebook_name": os.environ['notebook_instance_name'],
+ "product": "dlab",
+ "computational_name": dataproc_conf['computational_name']
+ }
+
+ for tag in additional_tags.split(','):
+ label_key = tag.split(':')[0]
+ label_value = tag.split(':')[1].replace('_', '-')
+ if '@' in label_value:
+ label_value = label_value[:label_value.find('@')]
+ if label_value != '':
+ dataproc_conf['cluster_labels'].update({label_key: label_value})
+ dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-{2}-ps-sa'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['dataproc_unique_index'] = GCPMeta.get_index_by_service_account_name(
+ dataproc_conf['dataproc_service_account_name'])
+ service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(dataproc_conf['service_base_name'],
+ dataproc_conf['dataproc_unique_index'],
+ os.environ['gcp_project_id'])
+ dataproc_conf['edge_instance_hostname'] = '{0}-{1}-{2}-edge'.format(dataproc_conf['service_base_name'],
+ dataproc_conf['project_name'],
+ dataproc_conf['endpoint_name'])
+ dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary. Exception:" + str(err))
+ sys.exit(1)
+
+ edge_status = GCPMeta.get_instance_status(dataproc_conf['edge_instance_hostname'])
if edge_status != 'RUNNING':
logging.info('ERROR: Edge node is unavailable! Aborting...')
print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
- append_result("Edge node is unavailable")
+ ssn_hostname = GCPMeta.get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
+ dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+ ssn_hostname)
+ dlab.fab.append_result("Edge node is unavailable")
sys.exit(1)
- print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+ print("Will create exploratory environment with edge node as access point as following: ".format(
+ json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
logging.info(json.dumps(dataproc_conf))
try:
- meta_lib.GCPMeta().dataproc_waiter(dataproc_conf['cluster_labels'])
+ GCPMeta.dataproc_waiter(dataproc_conf['cluster_labels'])
local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
except Exception as err:
traceback.print_exc()
- append_result("Dataproc waiter fail.", str(err))
+ dlab.fab.append_result("Dataproc waiter fail.", str(err))
sys.exit(1)
local("echo Waiting for changes to propagate; sleep 10")
@@ -133,14 +153,16 @@
dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count'])
dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count'])
if int(os.environ['dataproc_preemptible_count']) != 0:
- dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count'])
+ dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(
+ os.environ['dataproc_preemptible_count'])
else:
del dataproc_cluster['config']['secondaryWorkerConfig']
dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label']
- ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['project_name'] + '.pub').read()
+ ssh_user_pubkey = open('{}{}.pub'.format(os.environ['conf_key_dir'], dataproc_conf['project_name'])).read()
key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read())
ssh_admin_pubkey = key.publickey().exportKey("OpenSSH")
- dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
+ dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(
+ dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag']
with open('/root/result.json', 'w') as f:
data = {"hostname": dataproc_conf['cluster_name'], "error": ""}
@@ -149,7 +171,9 @@
try:
logging.info('[Creating Dataproc Cluster]')
print('[Creating Dataproc Cluster]')
- params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster))
+ params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'],
+ dataproc_conf['bucket_name'],
+ json.dumps(dataproc_cluster))
try:
local("~/scripts/{}.py {}".format('dataengine-service_create', params))
@@ -160,7 +184,6 @@
keyfile_name = "/root/keys/{}.pem".format(dataproc_conf['key_name'])
local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create Dataproc Cluster.", str(err))
+ dlab.fab.append_result("Failed to create Dataproc Cluster.", str(err))
local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
index 4247234..3710b1c 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_terminate.py
@@ -21,31 +21,34 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import traceback
import boto3
import argparse
import sys
+import json
import os
def terminate_dataproc_cluster(notebook_name, dataproc_name, bucket_name, ssh_user, key_path):
print('Terminating Dataproc cluster and cleaning Dataproc config from bucket')
try:
- cluster = meta_lib.GCPMeta().get_list_cluster_statuses([dataproc_name])
+ cluster = GCPMeta.get_list_cluster_statuses([dataproc_name])
if cluster[0]['status'] == 'running':
- computational_name = meta_lib.GCPMeta().get_cluster(dataproc_name).get('labels').get('computational_name')
- actions_lib.GCPActions().bucket_cleanup(bucket_name, os.environ['project_name'], dataproc_name)
+ computational_name = GCPMeta.get_cluster(dataproc_name).get('labels').get('computational_name')
+ GCPActions.bucket_cleanup(bucket_name, dataproc_conf['project_name'], dataproc_name)
print('The bucket {} has been cleaned successfully'.format(bucket_name))
- actions_lib.GCPActions().delete_dataproc_cluster(dataproc_name, os.environ['gcp_region'])
+ GCPActions.delete_dataproc_cluster(dataproc_name, os.environ['gcp_region'])
print('The Dataproc cluster {} has been terminated successfully'.format(dataproc_name))
- actions_lib.GCPActions().remove_kernels(notebook_name, dataproc_name, cluster[0]['version'], ssh_user,
+ GCPActions.remove_kernels(notebook_name, dataproc_name, cluster[0]['version'], ssh_user,
key_path, computational_name)
else:
print("There are no Dataproc clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate Dataproc cluster.", str(err))
sys.exit(1)
@@ -58,12 +61,14 @@
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
dataproc_conf = dict()
dataproc_conf['service_base_name'] = os.environ['conf_service_base_name']
- dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- dataproc_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+ dataproc_conf['edge_user_name'] = (os.environ['edge_user_name'])
+ dataproc_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ dataproc_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
dataproc_conf['dataproc_name'] = os.environ['dataproc_cluster_name']
dataproc_conf['gcp_project_id'] = os.environ['gcp_project_id']
dataproc_conf['gcp_region'] = os.environ['gcp_region']
@@ -79,13 +84,13 @@
print('[TERMINATE DATAPROC CLUSTER]')
try:
terminate_dataproc_cluster(dataproc_conf['notebook_name'], dataproc_conf['dataproc_name'],
- dataproc_conf['bucket_name'], os.environ['conf_os_user'], dataproc_conf['key_path'])
+ dataproc_conf['bucket_name'], os.environ['conf_os_user'],
+ dataproc_conf['key_path'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate Dataproc cluster.", str(err))
+ dlab.fab.append_result("Failed to terminate Dataproc cluster.", str(err))
raise Exception
- except Exception as err:
- print('Error: {0}'.format(err))
+ except:
sys.exit(1)
try:
@@ -96,6 +101,6 @@
"Action": "Terminate Dataproc cluster"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
index bbbd6de..d50e0f0 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_configure.py
@@ -24,9 +24,10 @@
import json
import time
from fabric.api import *
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import sys
import os
import uuid
@@ -37,7 +38,7 @@
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
- slave_hostname = GCPMeta().get_private_ip_address(slave_name)
+ slave_hostname = GCPMeta.get_private_ip_address(slave_name)
try:
logging.info('[CREATING DLAB SSH USER ON SLAVE NODE]')
print('[CREATING DLAB SSH USER ON SLAVE NODE]')
@@ -51,18 +52,14 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to create ssh user on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON SLAVE NODE]')
logging.info('[INSTALLING USERs KEY ON SLAVE NODE]')
- additional_config = {"user_keyname": os.environ['project_name'],
+ additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
@@ -70,15 +67,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to install ssh user key on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install ssh user key on slave.", str(err))
sys.exit(1)
try:
@@ -94,12 +87,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to configure proxy on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
@@ -114,13 +103,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to install prerequisites on slave.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
@@ -138,16 +122,18 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed configuring slave node", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to configure slave node.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure slave node.", str(err))
sys.exit(1)
+def clear_resources():
+ for i in range(data_engine['instance_count'] - 1):
+ slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
+ GCPActions.remove_instance(slave_name, data_engine['zone'])
+ GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
+
+
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
@@ -157,12 +143,15 @@
filename=local_log_filepath)
try:
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- data_engine['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- data_engine['endpoint_tag'] = os.environ['endpoint_name'].lower().replace('_', '-')
+ data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+ data_engine['edge_user_name'] = (os.environ['edge_user_name'])
+ data_engine['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+ data_engine['endpoint_tag'] = data_engine['endpoint_name']
data_engine['region'] = os.environ['gcp_region']
data_engine['zone'] = os.environ['gcp_zone']
try:
@@ -171,24 +160,26 @@
else:
data_engine['vpc_name'] = os.environ['gcp_vpc_name']
except KeyError:
- data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
- try:
- data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
+ data_engine['vpc_name'] = '{}-vpc'.format(data_engine['service_base_name'])
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
data_engine['computational_name'] = ''
- data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
- data_engine['project_name'])
+ data_engine['subnet_name'] = '{0}-{1}-{2}-subnet'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'])
data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], data_engine['key_name'])
- data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
- data_engine['project_name'])
+ data_engine['dataengine_service_account_name'] = '{}-{}-{}-ps-sa'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'])
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
@@ -196,9 +187,10 @@
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
- data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['project_name'] + \
- '-de-' + data_engine['exploratory_name'] + '-' + \
- data_engine['computational_name']
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
@@ -206,23 +198,19 @@
data_engine['gpu_accelerator_type'] = 'None'
if os.environ['application'] in ('tensor', 'deeplearning'):
data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
- data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- master_node_hostname = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
+ data_engine['network_tag'] = '{0}-{1}-{2}-ps'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'])
+ master_node_hostname = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
data_engine['project_name'], data_engine['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
data_engine['dlab_ssh_user'] = os.environ['conf_os_user']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- print("Failed to generate variables dictionary.")
- append_result("Failed to generate variables dictionary.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
@@ -238,33 +226,26 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to create ssh user on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON MASTER NODE]')
logging.info('[INSTALLING USERs KEY ON MASTER NODE]')
- additional_config = {"user_keyname": os.environ['project_name'],
+ additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(additional_config), data_engine['dlab_ssh_user'])
+ master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem",
+ json.dumps(additional_config), data_engine['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to install ssh user on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install ssh user on master.", str(err))
sys.exit(1)
try:
@@ -280,12 +261,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to configure proxy on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
@@ -300,13 +277,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to install prerequisites on master.", str(err))
+ clear_resources()
+ dlab.fab.append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
@@ -324,12 +296,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure master node", str(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+ dlab.fab.append_result("Failed to configure master node", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -344,17 +312,14 @@
if job.exitcode != 0:
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+ dlab.fab.append_result("Failed to configure slave nodes", str(err))
+ clear_resources()
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
- notebook_instance_ip = GCPMeta().get_private_ip_address(data_engine['notebook_name'])
+ notebook_instance_ip = GCPMeta.get_private_ip_address(data_engine['notebook_name'])
additional_info = {
"computational_name": data_engine['computational_name'],
"master_node_hostname": master_node_hostname,
@@ -379,18 +344,15 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- for i in range(data_engine['instance_count'] - 1):
- slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- GCPActions().remove_instance(slave_name, data_engine['zone'])
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+ dlab.fab.append_result("Failed to configure reverse proxy", str(err))
+ clear_resources()
sys.exit(1)
try:
- ip_address = GCPMeta().get_private_ip_address(data_engine['master_node_name'])
+ ip_address = GCPMeta.get_private_ip_address(data_engine['master_node_name'])
spark_master_url = "http://" + ip_address + ":8080"
spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
@@ -416,6 +378,7 @@
}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
index 2776834..262868c 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_prepare.py
@@ -24,11 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import argparse
+from fabric.api import *
if __name__ == "__main__":
instance_class = 'notebook'
@@ -38,135 +40,152 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- print('Generating infrastructure names and tags')
- data_engine = dict()
- data_engine['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- data_engine['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- data_engine['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- data_engine['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- data_engine['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- data_engine['region'] = os.environ['gcp_region']
- data_engine['zone'] = os.environ['gcp_zone']
- data_engine['endpoint_name'] = os.environ['endpoint_name']
-
- edge_status = GCPMeta().get_instance_status('{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
- data_engine['project_name'],
- data_engine['endpoint_name']))
- if edge_status != 'RUNNING':
- logging.info('ERROR: Edge node is unavailable! Aborting...')
- print('ERROR: Edge node is unavailable! Aborting...')
- ssn_hostname = GCPMeta().get_private_ip_address(data_engine['service_base_name'] + '-ssn')
- put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
- ssn_hostname)
- append_result("Edge node is unavailable")
- sys.exit(1)
-
try:
- if os.environ['gcp_vpc_name'] == '':
- raise KeyError
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ print('Generating infrastructure names and tags')
+ data_engine = dict()
+ data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
+ data_engine['edge_user_name'] = (os.environ['edge_user_name'])
+ data_engine['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ data_engine['project_tag'] = data_engine['project_name']
+ data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+ data_engine['endpoint_tag'] = data_engine['endpoint_name']
+ data_engine['region'] = os.environ['gcp_region']
+ data_engine['zone'] = os.environ['gcp_zone']
+
+ edge_status = GCPMeta.get_instance_status('{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name']))
+ if edge_status != 'RUNNING':
+ logging.info('ERROR: Edge node is unavailable! Aborting...')
+ print('ERROR: Edge node is unavailable! Aborting...')
+ ssn_hostname = GCPMeta.get_private_ip_address(data_engine['service_base_name'] + '-ssn')
+ dlab.fab.put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],
+ ssn_hostname)
+ dlab.fab.append_result("Edge node is unavailable")
+ sys.exit(1)
+
+ try:
+ if os.environ['gcp_vpc_name'] == '':
+ raise KeyError
+ else:
+ data_engine['vpc_name'] = os.environ['gcp_vpc_name']
+ except KeyError:
+ data_engine['vpc_name'] = '{}-vpc'.format(data_engine['service_base_name'])
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
else:
- data_engine['vpc_name'] = os.environ['gcp_vpc_name']
- except KeyError:
- data_engine['vpc_name'] = '{}-ssn-vpc'.format(data_engine['service_base_name'])
- try:
- data_engine['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
- except:
- data_engine['computational_name'] = ''
+ data_engine['exploratory_name'] = ''
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
+ data_engine['computational_name'] = ''
- data_engine['subnet_name'] = '{0}-{1}-subnet'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
- data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
- data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- data_engine['dataengine_service_account_name'] = '{}-{}-ps'.format(data_engine['service_base_name'],
- data_engine['project_name'])
+ data_engine['subnet_name'] = '{0}-{1}-{2}-subnet'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'])
+ data_engine['master_size'] = os.environ['gcp_dataengine_master_size']
+ data_engine['slave_size'] = os.environ['gcp_dataengine_slave_size']
+ data_engine['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ data_engine['dataengine_service_account_name'] = '{}-{}-{}-ps-sa'.format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'])
- if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
- if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
- data_engine['cluster_name'] = data_engine['service_base_name'] + '-' + data_engine['project_name'] + \
- '-de-' + data_engine['exploratory_name'] + '-' + \
- data_engine['computational_name']
- data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
- data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
- data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
- data_engine['notebook_name'] = os.environ['notebook_instance_name']
+ if os.environ['conf_os_family'] == 'debian':
+ initial_user = 'ubuntu'
+ sudo_group = 'sudo'
+ if os.environ['conf_os_family'] == 'redhat':
+ initial_user = 'ec2-user'
+ sudo_group = 'wheel'
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
+ data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
+ data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
+ data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
+ data_engine['notebook_name'] = os.environ['notebook_instance_name']
- data_engine['primary_disk_size'] = '30'
- data_engine['secondary_disk_size'] = os.environ['notebook_disk_size']
+ data_engine['primary_disk_size'] = '30'
+ data_engine['secondary_disk_size'] = os.environ['notebook_disk_size']
- data_engine['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if data_engine['shared_image_enabled'] == 'false':
- data_engine['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- data_engine['service_base_name'], data_engine['endpoint_tag'], data_engine['project_name'],
- os.environ['application'])
- data_engine['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- data_engine['service_base_name'], data_engine['endpoint_tag'], data_engine['project_name'],
- os.environ['application'])
- else:
- data_engine['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
- data_engine['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
- data_engine['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
- else data_engine['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
- print('Searching pre-configured images')
- data_engine['primary_image_name'] = GCPMeta().get_image_by_name(data_engine['notebook_primary_image_name'])
- if data_engine['primary_image_name'] == '':
- data_engine['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
- else:
- print('Pre-configured primary image found. Using: {}'.format(data_engine['primary_image_name'].get('name')))
- data_engine['primary_image_name'] = 'global/images/{}'.format(
- data_engine['primary_image_name'].get('name'))
+ data_engine['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if data_engine['shared_image_enabled'] == 'false':
+ data_engine['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_tag'],
+ os.environ['application'])
+ data_engine['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_tag'],
+ os.environ['application'])
+ else:
+ data_engine['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
+ data_engine['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ data_engine['service_base_name'], data_engine['endpoint_tag'], os.environ['application'])
+ data_engine['notebook_primary_image_name'] = (lambda x: os.environ['notebook_primary_image_name'] if x != 'None'
+ else data_engine['expected_primary_image_name'])(str(os.environ.get('notebook_primary_image_name')))
+ print('Searching pre-configured images')
+ data_engine['primary_image_name'] = GCPMeta.get_image_by_name(data_engine['notebook_primary_image_name'])
+ if data_engine['primary_image_name'] == '':
+ data_engine['primary_image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+ else:
+ print('Pre-configured primary image found. Using: {}'.format(data_engine['primary_image_name'].get('name')))
+ data_engine['primary_image_name'] = 'global/images/{}'.format(
+ data_engine['primary_image_name'].get('name'))
- data_engine['secondary_image_name'] = GCPMeta().get_image_by_name(data_engine['expected_secondary_image_name'])
- if data_engine['secondary_image_name'] == '':
- data_engine['secondary_image_name'] = 'None'
- else:
- print('Pre-configured secondary image found. Using: {}'.format(data_engine['secondary_image_name'].get('name')))
- data_engine['secondary_image_name'] = 'global/images/{}'.format(data_engine['secondary_image_name'].get('name'))
+ data_engine['secondary_image_name'] = GCPMeta.get_image_by_name(data_engine['expected_secondary_image_name'])
+ if data_engine['secondary_image_name'] == '':
+ data_engine['secondary_image_name'] = 'None'
+ else:
+ print('Pre-configured secondary image found. Using: {}'.format(
+ data_engine['secondary_image_name'].get('name')))
+ data_engine['secondary_image_name'] = 'global/images/{}'.format(
+ data_engine['secondary_image_name'].get('name'))
- with open('/root/result.json', 'w') as f:
- data = {"hostname": data_engine['cluster_name'], "error": ""}
- json.dump(data, f)
+ with open('/root/result.json', 'w') as f:
+ data = {"hostname": data_engine['cluster_name'], "error": ""}
+ json.dump(data, f)
- data_engine['gpu_accelerator_type'] = 'None'
- if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
- data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
- data_engine['network_tag'] = '{0}-{1}-ps'.format(data_engine['service_base_name'],
- data_engine['project_name'])
- data_engine['slave_labels'] = {"name": data_engine['cluster_name'],
- "sbn": data_engine['service_base_name'],
- "user": data_engine['edge_user_name'],
- "project_tag": data_engine['project_tag'],
- "endpoint_tag": data_engine['endpoint_tag'],
- "type": "slave",
- "notebook_name": data_engine['notebook_name'],
- "product": "dlab"}
- data_engine['master_labels'] = {"name": data_engine['cluster_name'],
- "sbn": data_engine['service_base_name'],
- "user": data_engine['edge_user_name'],
- "project_tag": data_engine['project_tag'],
- "endpoint_tag": data_engine['endpoint_tag'],
- "type": "master",
- "notebook_name": data_engine['notebook_name'],
- "product": "dlab"}
+ data_engine['gpu_accelerator_type'] = 'None'
+ if os.environ['application'] in ('tensor', 'tensor-rstudio', 'deeplearning'):
+ data_engine['gpu_accelerator_type'] = os.environ['gcp_gpu_accelerator_type']
+ data_engine['network_tag'] = '{0}-{1}-{2}-ps'.format(data_engine['service_base_name'],
+ data_engine['project_name'], data_engine['endpoint_name'])
+ additional_tags = os.environ['tags'].replace("': u'", ":").replace("', u'", ",").replace("{u'", "").replace(
+ "'}", "").lower()
+
+ data_engine['slave_labels'] = {"name": data_engine['cluster_name'],
+ "sbn": data_engine['service_base_name'],
+ "type": "slave",
+ "notebook_name": data_engine['notebook_name'],
+ "product": "dlab"}
+ data_engine['master_labels'] = {"name": data_engine['cluster_name'],
+ "sbn": data_engine['service_base_name'],
+ "type": "master",
+ "notebook_name": data_engine['notebook_name'],
+ "product": "dlab"}
+
+ for tag in additional_tags.split(','):
+ label_key = tag.split(':')[0]
+ label_value = tag.split(':')[1].replace('_', '-')
+ if '@' in label_value:
+ label_value = label_value[:label_value.find('@')]
+ if label_value != '':
+ data_engine['slave_labels'].update({label_key: label_value})
+ data_engine['master_labels'].update({label_key: label_value})
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary. Exception:" + str(err))
+ sys.exit(1)
try:
logging.info('[CREATE MASTER NODE]')
print('[CREATE MASTER NODE]')
params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
"--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
- "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
- "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}' --service_base_name {18}". \
+ "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+ "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} " \
+ "--labels '{17}' --service_base_name {18}". \
format(data_engine['master_node_name'], data_engine['region'], data_engine['zone'], data_engine['vpc_name'],
data_engine['subnet_name'], data_engine['master_size'], data_engine['ssh_key_path'], initial_user,
data_engine['dataengine_service_account_name'], data_engine['primary_image_name'],
@@ -180,9 +199,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create instance.", str(err))
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
+ dlab.fab.append_result("Failed to create instance.", str(err))
+ GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
sys.exit(1)
try:
@@ -190,10 +208,11 @@
logging.info('[CREATE SLAVE NODE {}]'.format(i + 1))
print('[CREATE SLAVE NODE {}]'.format(i + 1))
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
- params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5} " \
- "--ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9} " \
- "--secondary_image_name {10} --instance_class {11} --primary_disk_size {12} --secondary_disk_size {13} " \
- "--gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} --labels '{17}' --service_base_name {18}". \
+ params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} " \
+ "--instance_size {5} --ssh_key_path {6} --initial_user {7} --service_account_name {8} " \
+ "--image_name {9} --secondary_image_name {10} --instance_class {11} --primary_disk_size {12} " \
+ "--secondary_disk_size {13} --gpu_accelerator_type {14} --network_tag {15} --cluster_name {16} " \
+ "--labels '{17}' --service_base_name {18}". \
format(slave_name, data_engine['region'], data_engine['zone'],
data_engine['vpc_name'], data_engine['subnet_name'], data_engine['slave_size'],
data_engine['ssh_key_path'], initial_user, data_engine['dataengine_service_account_name'],
@@ -208,13 +227,12 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i+1)
try:
- GCPActions().remove_instance(slave_name, data_engine['zone'])
+ GCPActions.remove_instance(slave_name, data_engine['zone'])
except:
print("The slave instance {} hasn't been created.".format(slave_name))
- GCPActions().remove_instance(data_engine['master_node_name'], data_engine['zone'])
- append_result("Failed to create slave instances.", str(err))
+ GCPActions.remove_instance(data_engine['master_node_name'], data_engine['zone'])
+ dlab.fab.append_result("Failed to create slave instances.", str(err))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
index 0e40ed9..ce5af48 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_start.py
@@ -24,22 +24,24 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
+from fabric.api import *
def start_data_engine(zone, cluster_name):
print("Starting data engine cluster")
try:
- instances = GCPMeta().get_list_instances(zone, cluster_name)
+ instances = GCPMeta.get_list_instances(zone, cluster_name)
if 'items' in instances:
for i in instances['items']:
- GCPActions().start_instance(i['name'], zone)
+ GCPActions.start_instance(i['name'], zone)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to start dataengine", str(err))
sys.exit(1)
@@ -51,23 +53,27 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['zone'] = os.environ['gcp_zone']
- data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['cluster_name'] = \
- data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
- data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+ data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
try:
logging.info('[STARTING DATA ENGINE]')
print('[STARTING DATA ENGINE]')
@@ -75,7 +81,7 @@
start_data_engine(data_engine['zone'], data_engine['cluster_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to start Data Engine.", str(err))
+ dlab.fab.append_result("Failed to start Data Engine.", str(err))
raise Exception
except:
sys.exit(1)
@@ -84,9 +90,9 @@
logging.info('[UPDATE LAST ACTIVITY TIME]')
print('[UPDATE LAST ACTIVITY TIME]')
data_engine['computational_id'] = data_engine['cluster_name'] + '-m'
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
- data_engine['notebook_ip'] = GCPMeta().get_private_ip_address(os.environ['notebook_instance_name'])
- data_engine['computational_ip'] = GCPMeta().get_private_ip_address(data_engine['computational_id'])
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
+ data_engine['notebook_ip'] = GCPMeta.get_private_ip_address(os.environ['notebook_instance_name'])
+ data_engine['computational_ip'] = GCPMeta.get_private_ip_address(data_engine['computational_id'])
data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
params = '--os_user {0} --notebook_ip {1} --keyfile "{2}" --cluster_ip {3}' \
.format(os.environ['conf_os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
@@ -95,18 +101,17 @@
local("~/scripts/{}.py {}".format('update_inactivity_on_start', params))
except Exception as err:
traceback.print_exc()
- append_result("Failed to update last activity time.", str(err))
+ dlab.fab.append_result("Failed to update last activity time.", str(err))
raise Exception
except:
sys.exit(1)
-
try:
with open("/root/result.json", 'w') as result:
res = {"service_base_name": data_engine['service_base_name'],
"Action": "Start Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
index 2396600..e370bfb 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_stop.py
@@ -24,9 +24,10 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
@@ -34,12 +35,12 @@
def stop_data_engine(zone, cluster_name):
print("Stopping data engine cluster")
try:
- instances = GCPMeta().get_list_instances(zone, cluster_name)
+ instances = GCPMeta.get_list_instances(zone, cluster_name)
if 'items' in instances:
for i in instances['items']:
- GCPActions().stop_instance(i['name'], zone)
+ GCPActions.stop_instance(i['name'], zone)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to stop dataengine", str(err))
sys.exit(1)
@@ -51,23 +52,27 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['zone'] = os.environ['gcp_zone']
- data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['cluster_name'] = \
- data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
- data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+ data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
try:
logging.info('[STOPPING DATA ENGINE]')
print('[STOPPING DATA ENGINE]')
@@ -75,7 +80,7 @@
stop_data_engine(data_engine['zone'], data_engine['cluster_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to stop Data Engine.", str(err))
+ dlab.fab.append_result("Failed to stop Data Engine.", str(err))
raise Exception
except:
sys.exit(1)
@@ -85,6 +90,6 @@
"Action": "Stop Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
index f50ffb2..6d9adfd 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/dataengine_terminate.py
@@ -24,9 +24,10 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import traceback
import os
import uuid
@@ -34,19 +35,19 @@
def terminate_data_engine(zone, notebook_name, os_user, key_path, cluster_name):
print("Terminating data engine cluster")
try:
- instances = GCPMeta().get_list_instances(zone, cluster_name)
+ instances = GCPMeta.get_list_instances(zone, cluster_name)
if 'items' in instances:
for i in instances['items']:
- GCPActions().remove_instance(i['name'], zone)
+ GCPActions.remove_instance(i['name'], zone)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate dataengine", str(err))
sys.exit(1)
print("Removing Data Engine kernels from notebook")
try:
- remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name)
+ dlab.actions_lib.remove_dataengine_kernels(notebook_name, os_user, key_path, cluster_name)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove dataengine kernels from notebook", str(err))
sys.exit(1)
@@ -58,25 +59,29 @@
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
data_engine = dict()
- try:
- data_engine['exploratory_name'] = os.environ['exploratory_name'].lower().replace('_', '-')
- except:
+ if 'exploratory_name' in os.environ:
+ data_engine['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-').lower()
+ else:
data_engine['exploratory_name'] = ''
- try:
- data_engine['computational_name'] = os.environ['computational_name'].lower().replace('_', '-')
- except:
+ if 'computational_name' in os.environ:
+ data_engine['computational_name'] = os.environ['computational_name'].replace('_', '-').lower()
+ else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['zone'] = os.environ['gcp_zone']
- data_engine['user_name'] = os.environ['edge_user_name'].lower().replace('_', '-')
- data_engine['project_name'] = os.environ['project_name'].lower().replace('_', '-')
- data_engine['cluster_name'] = \
- data_engine['service_base_name'] + '-' + data_engine['project_name'] + '-de-' + \
- data_engine['exploratory_name'] + '-' + data_engine['computational_name']
+ data_engine['user_name'] = os.environ['edge_user_name']
+ data_engine['project_name'] = os.environ['project_name'].replace('_', '-').lower()
+ data_engine['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
+ data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
+ data_engine['project_name'],
+ data_engine['endpoint_name'],
+ data_engine['computational_name'])
data_engine['notebook_name'] = os.environ['notebook_instance_name']
- data_engine['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
+ data_engine['key_path'] = '{}/{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
try:
@@ -87,7 +92,7 @@
data_engine['key_path'], data_engine['cluster_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate Data Engine.", str(err))
+ dlab.fab.append_result("Failed to terminate Data Engine.", str(err))
raise Exception
except:
sys.exit(1)
@@ -98,6 +103,6 @@
"Action": "Terminate Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
index 4f18e12..238f9b4 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/deeplearning_configure.py
@@ -24,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
if __name__ == "__main__":
@@ -37,67 +39,76 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
- (instance_hostname, notebook_config['ssh_key_path'], initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -105,9 +116,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -124,9 +134,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
@@ -134,16 +143,16 @@
logging.info('[INSTALLING PREREQUISITES TO DEEPLEARNING NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO DEEPLEARNING NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}". \
- format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'], os.environ['gcp_region'])
+ format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'],
+ os.environ['gcp_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -165,27 +174,26 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure Deep Learning node.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure Deep Learning node.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
- additional_config = {"user_keyname": os.environ['project_name'],
+ additional_config = {"user_keyname": notebook_config['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -197,35 +205,34 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
else:
- print("Looks like another image creating operation for your template have been started a moment ago.")
+ print("Looks like another image creating operation for your template have been started a "
+ "moment ago.")
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -250,59 +257,64 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- tensorboard_url = "http://" + ip_address + ":6006/"
- jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(
- notebook_config['exploratory_name'])
- jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
- jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("TensorBoard URL: {}".format(tensorboard_url))
- print("TensorBoard log dir: /var/log/tensorboard")
- print("Jupyter URL: {}".format(jupyter_ip_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ tensorboard_url = "http://" + ip_address + ":6006/"
+ jupyter_ip_url = 'http://' + ip_address + ':8888/{}/'.format(notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+ notebook_config['exploratory_name'])
+ jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(notebook_config['project_name']))
+ print("TensorBoard URL: {}".format(tensorboard_url))
+ print("TensorBoard log dir: /var/log/tensorboard")
+ print("Jupyter URL: {}".format(jupyter_ip_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "tensorboard_log_dir": "/var/log/tensorboard",
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Jupyter",
- "url": jupyter_notebook_access_url},
- {"description": "TensorBoard",
- "url": tensorboard_access_url},
- {"description": "Ungit",
- "url": jupyter_ungit_access_url}#,
- #{"description": "Jupyter (via tunnel)",
- # "url": jupyter_ip_url},
- #{"description": "TensorBoard (via tunnel)",
- # "url": tensorboard_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "tensorboard_log_dir": "/var/log/tensorboard",
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Jupyter",
+ "url": jupyter_notebook_access_url},
+ {"description": "TensorBoard",
+ "url": tensorboard_access_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_access_url}#,
+ #{"description": "Jupyter (via tunnel)",
+ # "url": jupyter_ip_url},
+ #{"description": "TensorBoard (via tunnel)",
+ # "url": tensorboard_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
index 39d00fe..110efb9 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_configure.py
@@ -22,10 +22,17 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
+import traceback
+import logging
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import uuid
+from fabric.api import *
+
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,89 +42,124 @@
level=logging.DEBUG,
filename=local_log_filepath)
- print('Generating infrastructure names and tags')
- edge_conf = dict()
- edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- edge_conf['key_name'] = os.environ['conf_key_name']
- edge_conf['user_keyname'] = os.environ['project_name']
- edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- edge_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+ def clear_resources():
+ GCPActions.remove_instance(edge_conf['instance_name'], edge_conf['zone'])
+ GCPActions.remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
+ GCPActions.remove_bucket(edge_conf['bucket_name'])
+ GCPActions.remove_firewall(edge_conf['fw_edge_ingress_public'])
+ GCPActions.remove_firewall(edge_conf['fw_edge_ingress_internal'])
+ GCPActions.remove_firewall(edge_conf['fw_edge_egress_public'])
+ GCPActions.remove_firewall(edge_conf['fw_edge_egress_internal'])
+ GCPActions.remove_firewall(edge_conf['fw_ps_ingress'])
+ GCPActions.remove_firewall(edge_conf['fw_ps_egress_private'])
+ GCPActions.remove_firewall(edge_conf['fw_ps_egress_public'])
+ GCPActions.remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
+ GCPActions.remove_role(edge_conf['ps_role_name'])
+ GCPActions.remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
+ GCPActions.remove_role(edge_conf['edge_role_name'])
+ GCPActions.remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+
try:
- if os.environ['gcp_vpc_name'] == '':
- raise KeyError
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ print('Generating infrastructure names and tags')
+ edge_conf = dict()
+ edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+ edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ edge_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ edge_conf['key_name'] = os.environ['conf_key_name']
+ edge_conf['user_keyname'] = edge_conf['project_name']
+ try:
+ if os.environ['gcp_vpc_name'] == '':
+ raise KeyError
+ else:
+ edge_conf['vpc_name'] = os.environ['gcp_vpc_name']
+ except KeyError:
+ edge_conf['vpc_name'] = edge_conf['service_base_name'] + '-vpc'
+ edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+ edge_conf['subnet_name'] = '{0}-{1}-{2}-subnet'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['region'] = os.environ['gcp_region']
+ edge_conf['zone'] = os.environ['gcp_zone']
+ edge_conf['vpc_selflink'] = GCPMeta.get_vpc(edge_conf['vpc_name'])['selfLink']
+ edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+ edge_conf['edge_service_account_name'] = '{}-{}-{}-edge-sa'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['edge_unique_index'] = GCPMeta.get_index_by_service_account_name(
+ edge_conf['edge_service_account_name'])
+ edge_conf['edge_role_name'] = '{}-{}-{}-edge-role'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['edge_unique_index'])
+ edge_conf['ps_service_account_name'] = '{}-{}-{}-ps-sa'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['ps_unique_index'] = GCPMeta.get_index_by_service_account_name(edge_conf['ps_service_account_name'])
+ edge_conf['ps_role_name'] = '{}-{}-{}-ps-role'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'], edge_conf['ps_unique_index'])
+ edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'], edge_conf['endpoint_name'])
+ edge_conf['firewall_name'] = edge_conf['instance_name'] + '{}-sg'.format(edge_conf['instance_name'])
+ edge_conf['notebook_firewall_name'] = '{0}-{1}-{2}-nb-sg'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['instance_size'] = os.environ['gcp_edge_instance_size']
+ edge_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ edge_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(edge_conf['instance_name'])
+ edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
+ edge_conf['private_subnet_cidr'] = GCPMeta.get_subnet(edge_conf['subnet_name'],
+ edge_conf['region'])['ipCidrRange']
+ edge_conf['static_ip'] = \
+ GCPMeta.get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
+ edge_conf['private_ip'] = GCPMeta.get_private_ip_address(edge_conf['instance_name'])
+ edge_conf['vpc_cidrs'] = [edge_conf['vpc_cidr']]
+ edge_conf['fw_common_name'] = '{}-{}-{}-ps-sg'.format(edge_conf['service_base_name'], edge_conf['project_name'],
+ edge_conf['endpoint_name'])
+ edge_conf['fw_ps_ingress'] = '{}-ingress'.format(edge_conf['fw_common_name'])
+ edge_conf['fw_ps_egress_private'] = '{}-egress-private'.format(edge_conf['fw_common_name'])
+ edge_conf['fw_ps_egress_public'] = '{}-egress-public'.format(edge_conf['fw_common_name'])
+ edge_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(edge_conf['instance_name'])
+ edge_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(edge_conf['instance_name'])
+ edge_conf['fw_edge_egress_public'] = '{}-egress-public'.format(edge_conf['instance_name'])
+ edge_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(edge_conf['instance_name'])
+
+ if os.environ['conf_stepcerts_enabled'] == 'true':
+ edge_conf['step_cert_sans'] = ' --san {0} --san {1} --san {2}'.format(edge_conf['static_ip'],
+ edge_conf['instance_hostname'],
+ edge_conf['private_ip'])
else:
- edge_conf['vpc_name'] = os.environ['gcp_vpc_name']
- except KeyError:
- edge_conf['vpc_name'] = edge_conf['service_base_name'] + '-ssn-vpc'
- edge_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
- edge_conf['subnet_name'] = '{0}-{1}-subnet'.format(edge_conf['service_base_name'], edge_conf['project_name'])
- edge_conf['region'] = os.environ['gcp_region']
- edge_conf['zone'] = os.environ['gcp_zone']
- edge_conf['vpc_selflink'] = GCPMeta().get_vpc(edge_conf['vpc_name'])['selfLink']
- edge_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
- edge_conf['edge_service_account_name'] = '{}-{}-edge'.format(edge_conf['service_base_name'],
- edge_conf['project_name'])
- edge_conf['edge_unique_index'] = GCPMeta().get_index_by_service_account_name(edge_conf['edge_service_account_name'])
- edge_conf['edge_role_name'] = '{}-{}-{}-edge'.format(edge_conf['service_base_name'],
- edge_conf['project_name'], edge_conf['edge_unique_index'])
- edge_conf['ps_service_account_name'] = '{}-{}-ps'.format(edge_conf['service_base_name'],
- edge_conf['project_name'])
- edge_conf['ps_unique_index'] = GCPMeta().get_index_by_service_account_name(edge_conf['ps_service_account_name'])
- edge_conf['ps_role_name'] = '{}-{}-{}-ps'.format(edge_conf['service_base_name'],
- edge_conf['project_name'], edge_conf['ps_unique_index'])
- edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
- edge_conf['project_name'], edge_conf['endpoint_name'])
- edge_conf['firewall_name'] = edge_conf['instance_name'] + '{}-firewall'.format(edge_conf['instance_name'])
- edge_conf['notebook_firewall_name'] = '{0}-{1}-{2}-nb-firewall'.format(edge_conf['service_base_name'],
- edge_conf['project_name'], os.environ['endpoint_name'])
- edge_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(edge_conf['service_base_name'],
- edge_conf['project_name'],
- edge_conf['endpoint_name'])
- edge_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(edge_conf['service_base_name'],
- edge_conf['endpoint_name'])
- edge_conf['instance_size'] = os.environ['gcp_edge_instance_size']
- edge_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- edge_conf['static_address_name'] = '{0}-{1}-ip'.format(edge_conf['service_base_name'], edge_conf['project_name'])
- instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_conf['instance_name'])
- edge_conf['dlab_ssh_user'] = os.environ['conf_os_user']
- edge_conf['private_subnet_cidr'] = GCPMeta().get_subnet(edge_conf['subnet_name'],
- edge_conf['region'])['ipCidrRange']
- edge_conf['static_ip'] = \
- GCPMeta().get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
- edge_conf['private_ip'] = GCPMeta().get_private_ip_address(edge_conf['instance_name'])
- edge_conf['vpc_cidrs'] = [edge_conf['vpc_cidr']]
- edge_conf['fw_common_name'] = '{}-{}-ps'.format(edge_conf['service_base_name'], edge_conf['project_name'])
- edge_conf['fw_ps_ingress'] = '{}-ingress'.format(edge_conf['fw_common_name'])
- edge_conf['fw_ps_egress_private'] = '{}-egress-private'.format(edge_conf['fw_common_name'])
- edge_conf['fw_ps_egress_public'] = '{}-egress-public'.format(edge_conf['fw_common_name'])
- edge_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(edge_conf['instance_name'])
- edge_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(edge_conf['instance_name'])
- edge_conf['fw_edge_egress_public'] = '{}-egress-public'.format(edge_conf['instance_name'])
- edge_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(edge_conf['instance_name'])
+ edge_conf['step_cert_sans'] = ''
- if os.environ['conf_stepcerts_enabled'] == 'true':
- step_cert_sans = ' --san {0} --san {1} --san {2}'.format(edge_conf['static_ip'], instance_hostname,
- edge_conf['private_ip'])
- else:
- step_cert_sans = ''
-
- edge_conf['allowed_ip_cidr'] = list()
- for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
- edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+ edge_conf['allowed_ip_cidr'] = list()
+ for cidr in os.environ['conf_allowed_ip_cidr'].split(','):
+ edge_conf['allowed_ip_cidr'].append(cidr.replace(' ', ''))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+ clear_resources()
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ edge_conf['initial_user'] = 'ubuntu'
+ edge_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ edge_conf['initial_user'] = 'ec2-user'
+ edge_conf['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, "/root/keys/" + os.environ['conf_key_name'] + ".pem", initial_user,
- edge_conf['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ edge_conf['instance_hostname'], "/root/keys/" + os.environ['conf_key_name'] + ".pem",
+ edge_conf['initial_user'], edge_conf['dlab_ssh_user'], edge_conf['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -125,53 +167,24 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
- GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
- GCPActions().remove_bucket(edge_conf['bucket_name'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['ps_role_name'])
- GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['edge_role_name'])
- GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ clear_resources()
sys.exit(1)
try:
print('[INSTALLING PREREQUISITES]')
logging.info('[INSTALLING PREREQUISITES]')
- params = "--hostname {} --keyfile {} --user {} --region {}".\
- format(instance_hostname, edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'], os.environ['gcp_region'])
+ params = "--hostname {} --keyfile {} --user {} --region {}".format(
+ edge_conf['instance_hostname'], edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'],
+ os.environ['gcp_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
- GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
- GCPActions().remove_bucket(edge_conf['bucket_name'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['ps_role_name'])
- GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['edge_role_name'])
- GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -179,7 +192,7 @@
logging.info('[INSTALLING HTTP PROXY]')
additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'],
"template_file": "/root/templates/squid.conf",
- "project_name": os.environ['project_name'],
+ "project_name": edge_conf['project_name'],
"ldap_host": os.environ['ldap_hostname'],
"ldap_dn": os.environ['ldap_dn'],
"ldap_user": os.environ['ldap_service_username'],
@@ -187,7 +200,7 @@
"vpc_cidrs": edge_conf['vpc_cidrs'],
"allowed_ip_cidr": edge_conf['allowed_ip_cidr']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \
- .format(instance_hostname, edge_conf['ssh_key_path'], json.dumps(additional_config),
+ .format(edge_conf['instance_hostname'], edge_conf['ssh_key_path'], json.dumps(additional_config),
edge_conf['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('configure_http_proxy', params))
@@ -195,23 +208,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing http proxy.", str(err))
- GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
- GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
- GCPActions().remove_bucket(edge_conf['bucket_name'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['ps_role_name'])
- GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['edge_role_name'])
- GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+ dlab.fab.append_result("Failed installing http proxy.", str(err))
+ clear_resources()
sys.exit(1)
@@ -221,41 +219,29 @@
additional_config = {"user_keyname": edge_conf['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, edge_conf['ssh_key_path'], json.dumps(additional_config), edge_conf['dlab_ssh_user'])
+ edge_conf['instance_hostname'], edge_conf['ssh_key_path'], json.dumps(additional_config),
+ edge_conf['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key. Excpeption: " + str(err))
- GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
- GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
- GCPActions().remove_bucket(edge_conf['bucket_name'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['ps_role_name'])
- GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['edge_role_name'])
- GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+ dlab.fab.append_result("Failed installing users key. Excpeption: " + str(err))
+ clear_resources()
sys.exit(1)
try:
print('[INSTALLING NGINX REVERSE PROXY]')
logging.info('[INSTALLING NGINX REVERSE PROXY]')
- keycloak_client_secret = str(uuid.uuid4())
+ edge_conf['keycloak_client_secret'] = str(uuid.uuid4())
params = "--hostname {} --keyfile {} --user {} --keycloak_client_id {} --keycloak_client_secret {} " \
- "--step_cert_sans '{}'" \
- .format(instance_hostname, edge_conf['ssh_key_path'], edge_conf['dlab_ssh_user'],
- edge_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'], keycloak_client_secret,
- step_cert_sans)
+ "--step_cert_sans '{}'".format(edge_conf['instance_hostname'], edge_conf['ssh_key_path'],
+ edge_conf['dlab_ssh_user'], '{}-{}-{}'.format(
+ edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name']),
+ edge_conf['keycloak_client_secret'], edge_conf['step_cert_sans'])
try:
local("~/scripts/{}.py {}".format('configure_nginx_reverse_proxy', params))
@@ -268,37 +254,23 @@
.format(edge_conf['service_base_name'], os.environ['keycloak_auth_server_url'],
os.environ['keycloak_realm_name'], os.environ['keycloak_user'],
os.environ['keycloak_user_password'],
- keycloak_client_secret, instance_hostname, os.environ['project_name'], os.environ['endpoint_name'])
+ edge_conf['keycloak_client_secret'], edge_conf['instance_hostname'], edge_conf['project_name'],
+ edge_conf['endpoint_name'])
try:
local("~/scripts/{}.py {}".format('configure_keycloak', keycloak_params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing nginx reverse proxy. Excpeption: " + str(err))
- GCPActions().remove_instance(edge_conf['instance_name'], edge_conf['zone'])
- GCPActions().remove_static_address(edge_conf['static_address_name'], edge_conf['region'])
- GCPActions().remove_bucket(edge_conf['bucket_name'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(edge_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(edge_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(edge_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(edge_conf['ps_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['ps_role_name'])
- GCPActions().remove_service_account(edge_conf['edge_service_account_name'], edge_conf['service_base_name'])
- GCPActions().remove_role(edge_conf['edge_role_name'])
- GCPActions().remove_subnet(edge_conf['subnet_name'], edge_conf['region'])
+ dlab.fab.append_result("Failed installing nginx reverse proxy. Excpeption: " + str(err))
+ clear_resources()
sys.exit(1)
try:
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(edge_conf['instance_name']))
- print("Hostname: {}".format(instance_hostname))
+ print("Hostname: {}".format(edge_conf['instance_hostname']))
print("Public IP: {}".format(edge_conf['static_ip']))
print("Private IP: {}".format(edge_conf['private_ip']))
print("Key name: {}".format(edge_conf['key_name']))
@@ -306,7 +278,7 @@
print("Shared bucket name: {}".format(edge_conf['shared_bucket_name']))
print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr']))
with open("/root/result.json", 'w') as result:
- res = {"hostname": instance_hostname,
+ res = {"hostname": edge_conf['instance_hostname'],
"public_ip": edge_conf['static_ip'],
"ip": edge_conf['private_ip'],
"instance_id": edge_conf['instance_name'],
@@ -317,11 +289,12 @@
"socks_port": "1080",
"notebook_subnet": edge_conf['private_subnet_cidr'],
"full_edge_conf": edge_conf,
- "project_name": os.environ['project_name'],
+ "project_name": edge_conf['project_name'],
"@class": "com.epam.dlab.dto.gcp.edge.EdgeInfoGcp",
"Action": "Create new EDGE server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results.", str(err))
+ clear_resources()
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
index 06085dd..52da1be 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_start.py
@@ -21,9 +21,13 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
+import os
import sys
+import json
if __name__ == "__main__":
@@ -35,31 +39,34 @@
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
edge_conf = dict()
- edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- edge_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
+ edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+ edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ edge_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
edge_conf['region'] = os.environ['gcp_region']
edge_conf['zone'] = os.environ['gcp_zone']
- edge_conf['static_address_name'] = '{0}-{1}-ip'.format(edge_conf['service_base_name'], edge_conf['project_name'])
+ edge_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(edge_conf['service_base_name'],
+ edge_conf['project_name'],
+ edge_conf['endpoint_name'])
logging.info('[START EDGE]')
print('[START EDGE]')
try:
- GCPActions().start_instance(edge_conf['instance_name'], edge_conf['zone'])
+ GCPActions.start_instance(edge_conf['instance_name'], edge_conf['zone'])
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to start edge.", str(err))
+ dlab.fab.append_result("Failed to start edge.", str(err))
sys.exit(1)
try:
- instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_conf['instance_name'])
+ instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_conf['instance_name'])
public_ip_address = \
- GCPMeta().get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
- ip_address = GCPMeta().get_private_ip_address(edge_conf['instance_name'])
+ GCPMeta.get_static_address(edge_conf['region'], edge_conf['static_address_name'])['address']
+ ip_address = GCPMeta.get_private_ip_address(edge_conf['instance_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(edge_conf['instance_name']))
@@ -74,7 +81,7 @@
"Action": "Start up notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
index 5342d8a..ee15222 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_stop.py
@@ -21,9 +21,13 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import os
+import logging
import sys
+import json
if __name__ == "__main__":
@@ -35,21 +39,22 @@
filename=local_log_filepath)
print('Generating infrastructure names and tags')
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
edge_conf = dict()
- edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
+ edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
edge_conf['zone'] = os.environ['gcp_zone']
- edge_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- edge_conf['endpoint_name'] = os.environ['endpoint_name'].lower().replace('_', '-')
+ edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ edge_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
edge_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(edge_conf['service_base_name'],
edge_conf['project_name'], edge_conf['endpoint_name'])
logging.info('[STOP EDGE]')
print('[STOP EDGE]')
try:
- GCPActions().stop_instance(edge_conf['instance_name'], edge_conf['zone'])
+ GCPActions.stop_instance(edge_conf['instance_name'], edge_conf['zone'])
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to stop edge.", str(err))
+ dlab.fab.append_result("Failed to stop edge.", str(err))
sys.exit(1)
try:
@@ -58,7 +63,6 @@
"Action": "Stop edge server"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
-
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
index 6385ffd..8080988 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/edge_terminate.py
@@ -22,118 +22,127 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import traceback
+import logging
-def terminate_edge_node(user_name, service_base_name, region, zone):
+def terminate_edge_node(user_name, service_base_name, region, zone, project_name, endpoint_name):
print("Terminating Dataengine-service clusters")
try:
labels = [
{'sbn': service_base_name},
{'user': user_name}
]
- clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+ clusters_list = GCPMeta.get_dataproc_list(labels)
if clusters_list:
for cluster_name in clusters_list:
- actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+ GCPActions.delete_dataproc_cluster(cluster_name, region)
print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
else:
print("There are no Dataproc clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate dataproc", str(err))
sys.exit(1)
print("Terminating EDGE and notebook instances")
- base = '{}-{}'.format(service_base_name, user_name)
- keys = ['edge', 'ps', 'ip', 'bucket', 'subnet']
+ base = '{}-{}-{}'.format(service_base_name, project_name, endpoint_name)
+ keys = ['edge', 'ps', 'static-ip', 'bucket', 'subnet']
targets = ['{}-{}'.format(base, k) for k in keys]
try:
- instances = GCPMeta().get_list_instances(zone, base)
+ instances = GCPMeta.get_list_instances(zone, base)
if 'items' in instances:
for i in instances['items']:
if 'user' in i['labels'] and user_name == i['labels']['user']:
- GCPActions().remove_instance(i['name'], zone)
+ GCPActions.remove_instance(i['name'], zone)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate instances", str(err))
sys.exit(1)
print("Removing static addresses")
try:
- static_addresses = GCPMeta().get_list_static_addresses(region, base)
+ static_addresses = GCPMeta.get_list_static_addresses(region, base)
if 'items' in static_addresses:
for i in static_addresses['items']:
if bool(set(targets) & set([i['name']])):
- GCPActions().remove_static_address(i['name'], region)
+ GCPActions.remove_static_address(i['name'], region)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove static IPs", str(err))
sys.exit(1)
print("Removing storage bucket")
try:
- buckets = GCPMeta().get_list_buckets(base)
+ buckets = GCPMeta.get_list_buckets(base)
if 'items' in buckets:
for i in buckets['items']:
if bool(set(targets) & set([i['name']])):
- GCPActions().remove_bucket(i['name'])
+ GCPActions.remove_bucket(i['name'])
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove buckets", str(err))
sys.exit(1)
print("Removing firewalls")
try:
- firewalls = GCPMeta().get_list_firewalls(base)
+ firewalls = GCPMeta.get_list_firewalls(base)
if 'items' in firewalls:
for i in firewalls['items']:
if bool(set(targets) & set(i['targetTags'])):
- GCPActions().remove_firewall(i['name'])
+ GCPActions.remove_firewall(i['name'])
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove security groups", str(err))
sys.exit(1)
print("Removing Service accounts and roles")
try:
- list_service_accounts = GCPMeta().get_list_service_accounts()
+ list_service_accounts = GCPMeta.get_list_service_accounts()
for service_account in (set(targets) & set(list_service_accounts)):
if service_account.startswith(service_base_name):
- GCPActions().remove_service_account(service_account, service_base_name)
- list_roles_names = GCPMeta().get_list_roles()
+ GCPActions.remove_service_account(service_account, service_base_name)
+ list_roles_names = GCPMeta.get_list_roles()
for role in (set(targets) & set(list_roles_names)):
if role.startswith(service_base_name):
- GCPActions().remove_role(role)
+ GCPActions.remove_role(role)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove service accounts and roles", str(err))
sys.exit(1)
print("Removing subnets")
try:
- list_subnets = GCPMeta().get_list_subnetworks(region, '', base)
+ list_subnets = GCPMeta.get_list_subnetworks(region, '', base)
if 'items' in list_subnets:
vpc_selflink = list_subnets['items'][0]['network']
vpc_name = vpc_selflink.split('/')[-1]
- subnets = GCPMeta().get_list_subnetworks(region, vpc_name, base)
+ subnets = GCPMeta.get_list_subnetworks(region, vpc_name, base)
for i in subnets['items']:
if bool(set(targets) & set([i['name']])):
- GCPActions().remove_subnet(i['name'], region)
+ GCPActions.remove_subnet(i['name'], region)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove subnets", str(err))
sys.exit(1)
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/edge/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
edge_conf = dict()
- edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- edge_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
+ edge_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+ edge_conf['edge_user_name'] = (os.environ['edge_user_name'])
+ edge_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ edge_conf['endpoint_name'] = os.environ['endpoint_name'].replace('_', '-').lower()
edge_conf['region'] = os.environ['gcp_region']
edge_conf['zone'] = os.environ['gcp_zone']
@@ -142,12 +151,13 @@
print('[TERMINATE EDGE]')
try:
terminate_edge_node(edge_conf['edge_user_name'], edge_conf['service_base_name'],
- edge_conf['region'], edge_conf['zone'])
+ edge_conf['region'], edge_conf['zone'], edge_conf['project_name'],
+ edge_conf['endpoint_name'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate edge.", str(err))
- except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate edge.", str(err))
+ raise Exception
+ except:
sys.exit(1)
try:
@@ -157,6 +167,6 @@
"Action": "Terminate edge node"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
index 79794b7..863be19 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyter_configure.py
@@ -24,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
if __name__ == "__main__":
@@ -37,63 +39,76 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, notebook_config['ssh_key_path'], initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -101,9 +116,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -120,9 +134,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
@@ -138,9 +151,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
@@ -164,9 +176,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure jupyter.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure jupyter.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -175,16 +186,16 @@
additional_config = {"user_keyname": os.environ['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -196,35 +207,34 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
else:
- print("Looks like another image creating operation for your template have been started a moment ago.")
+ print("Looks like another image creating operation for your template have been started a "
+ "moment ago.")
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -249,51 +259,55 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
- jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("Jupyter URL: {}".format(jupyter_ip_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
- print("ReverseProxyUngit".format(jupyter_ungit_access_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+ notebook_config['exploratory_name'])
+ jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(os.environ['project_name']))
+ print("Jupyter URL: {}".format(jupyter_ip_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print("ReverseProxyNotebook".format(jupyter_notebook_access_url))
+ print("ReverseProxyUngit".format(jupyter_ungit_access_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Jupyter",
- "url": jupyter_notebook_access_url},
- {"description": "Ungit",
- "url": jupyter_ungit_access_url}#,
- #{"description": "Jupyter (via tunnel)",
- # "url": jupyter_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Jupyter",
+ "url": jupyter_notebook_access_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_access_url}#,
+ #{"description": "Jupyter (via tunnel)",
+ # "url": jupyter_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
index b41b024..ee95260 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/jupyterlab_configure.py
@@ -1,26 +1,5 @@
#!/usr/bin/python
-# *****************************************************************************
-# #
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-# #
-# http://www.apache.org/licenses/LICENSE-2.0
-# #
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# #
-# ******************************************************************************
-
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
@@ -45,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
if __name__ == "__main__":
@@ -58,65 +39,76 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, notebook_config['ssh_key_path'], initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -124,9 +116,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -143,9 +134,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
@@ -161,9 +151,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
@@ -186,9 +175,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure jupyter.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure jupyter.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -197,16 +185,16 @@
additional_config = {"user_keyname": os.environ['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -218,35 +206,34 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
if notebook_config['shared_image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
else:
- print("Looks like another image creating operation for your template have been started a moment ago.")
+ print("Looks like another image creating operation for your template have been started a "
+ "moment ago.")
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -271,12 +258,11 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -294,9 +280,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy for docker.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
@@ -312,51 +297,55 @@
try:
local("~/scripts/jupyterlab_container_start.py {}".format(params))
except:
- traceback.print_exc()
- raise Exception
+ traceback.print_exc()
+ raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to start Jupyter container.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to start Jupyter container.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
- jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("JupyterLab URL: {}".format(jupyter_ip_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
- print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ jupyter_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(
+ notebook_config['exploratory_name'])
+ jupyter_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(os.environ['project_name']))
+ print("JupyterLab URL: {}".format(jupyter_ip_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
+ print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "JupyterLab",
- "url": jupyter_notebook_acces_url},
- {"description": "Ungit",
- "url": jupyter_ungit_acces_url},
- #{"description": "JupyterLab (via tunnel)",
- # "url": jupyter_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "JupyterLab",
+ "url": jupyter_notebook_acces_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_acces_url},
+ #{"description": "JupyterLab (via tunnel)",
+ # "url": jupyter_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
index 82edc16..47b6cde 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/project_prepare.py
@@ -22,11 +22,17 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import sys
+import time
+import os
import traceback
+import logging
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import uuid
+from fabric.api import *
+
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
@@ -35,92 +41,111 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- print('Generating infrastructure names and tags')
- project_conf = dict()
- project_conf['edge_unique_index'] = str(uuid.uuid4())[:5]
- project_conf['ps_unique_index'] = str(uuid.uuid4())[:5]
- project_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- project_conf['key_name'] = os.environ['conf_key_name']
- project_conf['user_keyname'] = os.environ['project_name']
- project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- project_conf['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
try:
- if os.environ['gcp_vpc_name'] == '':
- raise KeyError
- else:
- project_conf['vpc_name'] = os.environ['gcp_vpc_name']
- except KeyError:
- project_conf['vpc_name'] = project_conf['service_base_name'] + '-ssn-vpc'
- project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
- project_conf['private_subnet_name'] = '{0}-{1}-subnet'.format(project_conf['service_base_name'],
- project_conf['project_name'])
- project_conf['subnet_name'] = os.environ['gcp_subnet_name']
- project_conf['region'] = os.environ['gcp_region']
- project_conf['zone'] = os.environ['gcp_zone']
- project_conf['vpc_selflink'] = GCPMeta().get_vpc(project_conf['vpc_name'])['selfLink']
- project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
- project_conf['edge_service_account_name'] = '{}-{}-edge'.format(project_conf['service_base_name'],
- project_conf['project_name'])
- project_conf['edge_role_name'] = '{}-{}-{}-edge'.format(project_conf['service_base_name'],
- project_conf['project_name'], project_conf['edge_unique_index'])
- project_conf['ps_service_account_name'] = '{}-{}-ps'.format(project_conf['service_base_name'],
- project_conf['project_name'])
- project_conf['ps_role_name'] = '{}-{}-{}-ps'.format(project_conf['service_base_name'],
- project_conf['project_name'], project_conf['ps_unique_index'])
- project_conf['ps_policy_path'] = '/root/files/ps_policy.json'
- project_conf['ps_roles_path'] = '/root/files/ps_roles.json'
- project_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(project_conf['service_base_name'],
- project_conf['project_name'], project_conf['endpoint_tag'])
- project_conf['ssn_instance_name'] = '{}-ssn'.format(project_conf['service_base_name'])
- project_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ print('Generating infrastructure names and tags')
+ project_conf = dict()
+ project_conf['edge_unique_index'] = str(uuid.uuid4())[:5]
+ project_conf['ps_unique_index'] = str(uuid.uuid4())[:5]
+ project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+ project_conf['key_name'] = os.environ['conf_key_name']
+ project_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ project_conf['user_keyname'] = project_conf['project_name']
+ project_conf['project_tag'] = (project_conf['project_name'])
+ project_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ project_conf['endpoint_tag'] = project_conf['endpoint_name']
+ try:
+ if os.environ['gcp_vpc_name'] == '':
+ raise KeyError
+ else:
+ project_conf['vpc_name'] = os.environ['gcp_vpc_name']
+ except KeyError:
+ project_conf['vpc_name'] = project_conf['service_base_name'] + '-vpc'
+ project_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+ project_conf['private_subnet_name'] = '{0}-{1}-{2}-subnet'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['subnet_name'] = os.environ['gcp_subnet_name']
+ project_conf['region'] = os.environ['gcp_region']
+ project_conf['zone'] = os.environ['gcp_zone']
+ project_conf['vpc_selflink'] = GCPMeta.get_vpc(project_conf['vpc_name'])['selfLink']
+ project_conf['private_subnet_prefix'] = os.environ['conf_private_subnet_prefix']
+ project_conf['edge_service_account_name'] = '{}-{}-{}-edge-sa'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['edge_role_name'] = '{}-{}-{}-{}-edge-role'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'],
+ project_conf['edge_unique_index'])
+ project_conf['ps_service_account_name'] = '{}-{}-{}-ps-sa'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['ps_role_name'] = '{}-{}-{}-{}-ps-role'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'],
+ project_conf['ps_unique_index'])
+ project_conf['ps_policy_path'] = '/root/files/ps_policy.json'
+ project_conf['ps_roles_path'] = '/root/files/ps_roles.json'
+ project_conf['instance_name'] = '{0}-{1}-{2}-edge'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['ssn_instance_name'] = '{}-ssn'.format(project_conf['service_base_name'])
+ project_conf['bucket_name'] = '{0}-{1}-{2}-bucket'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(project_conf['service_base_name'],
+ project_conf['endpoint_name'])
+ project_conf['instance_size'] = os.environ['gcp_edge_instance_size']
+ project_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ project_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+ project_conf['static_address_name'] = '{0}-{1}-{2}-static-ip'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['fw_edge_ingress_public'] = '{}-sg-ingress-public'.format(project_conf['instance_name'])
+ project_conf['fw_edge_ingress_internal'] = '{}-sg-ingress-internal'.format(project_conf['instance_name'])
+ project_conf['fw_edge_egress_public'] = '{}-sg-egress-public'.format(project_conf['instance_name'])
+ project_conf['fw_edge_egress_internal'] = '{}-sg-egress-internal'.format(project_conf['instance_name'])
+ project_conf['ps_firewall_target'] = '{0}-{1}-{2}-ps'.format(project_conf['service_base_name'],
+ project_conf['project_name'],
+ project_conf['endpoint_name'])
+ project_conf['fw_common_name'] = '{}-{}-{}-ps'.format(project_conf['service_base_name'],
project_conf['project_name'],
project_conf['endpoint_name'])
- project_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(project_conf['service_base_name'],
- project_conf['endpoint_name'])
- project_conf['instance_size'] = os.environ['gcp_edge_instance_size']
- project_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- project_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
- project_conf['static_address_name'] = '{0}-{1}-ip'.format(project_conf['service_base_name'], project_conf['project_name'])
- project_conf['fw_edge_ingress_public'] = '{}-ingress-public'.format(project_conf['instance_name'])
- project_conf['fw_edge_ingress_internal'] = '{}-ingress-internal'.format(project_conf['instance_name'])
- project_conf['fw_edge_egress_public'] = '{}-egress-public'.format(project_conf['instance_name'])
- project_conf['fw_edge_egress_internal'] = '{}-egress-internal'.format(project_conf['instance_name'])
- project_conf['ps_firewall_target'] = '{0}-{1}-ps'.format(project_conf['service_base_name'],
- project_conf['project_name'])
- project_conf['fw_common_name'] = '{}-{}-ps'.format(project_conf['service_base_name'], project_conf['project_name'])
- project_conf['fw_ps_ingress'] = '{}-ingress'.format(project_conf['fw_common_name'])
- project_conf['fw_ps_egress_private'] = '{}-egress-private'.format(project_conf['fw_common_name'])
- project_conf['fw_ps_egress_public'] = '{}-egress-public'.format(project_conf['fw_common_name'])
- project_conf['network_tag'] = project_conf['instance_name']
- project_conf['instance_labels'] = {"name": project_conf['instance_name'],
- "sbn": project_conf['service_base_name'],
- "project_tag": project_conf['project_tag'],
- "endpoint_tag": project_conf['endpoint_tag'],
- "product": "dlab"}
- project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
- project_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
- if 'conf_user_subnets_range' in os.environ:
- project_conf['user_subnets_range'] = os.environ['conf_user_subnets_range']
- else:
- project_conf['user_subnets_range'] = ''
+ project_conf['fw_ps_ingress'] = '{}-sg-ingress'.format(project_conf['fw_common_name'])
+ project_conf['fw_ps_egress_private'] = '{}-sg-egress-private'.format(project_conf['fw_common_name'])
+ project_conf['fw_ps_egress_public'] = '{}-sg-egress-public'.format(project_conf['fw_common_name'])
+ project_conf['network_tag'] = project_conf['instance_name']
+ project_conf['instance_labels'] = {"name": project_conf['instance_name'],
+ "sbn": project_conf['service_base_name'],
+ "project_tag": project_conf['project_tag'],
+ "endpoint_tag": project_conf['endpoint_tag'],
+ "product": "dlab"}
+ project_conf['tag_name'] = project_conf['service_base_name'] + '-tag'
+ project_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+ if 'conf_user_subnets_range' in os.environ:
+ project_conf['user_subnets_range'] = os.environ['conf_user_subnets_range']
+ else:
+ project_conf['user_subnets_range'] = ''
- # FUSE in case of absence of user's key
- try:
- project_conf['user_key'] = os.environ['key']
+ # FUSE in case of absence of user's key
try:
- local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'], os.environ['project_name']))
- except:
- print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
- except KeyError:
- print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
- sys.exit(1)
+ project_conf['user_key'] = os.environ['key']
+ try:
+ local('echo "{0}" >> {1}{2}.pub'.format(project_conf['user_key'], os.environ['conf_key_dir'],
+ project_conf['project_name']))
+ except:
+ print("ADMINSs PUBLIC KEY DOES NOT INSTALLED")
+ except KeyError:
+ print("ADMINSs PUBLIC KEY DOES NOT UPLOADED")
+ sys.exit(1)
- print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(
- project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
- logging.info(json.dumps(project_conf))
+ print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(
+ project_conf, sort_keys=True, indent=4, separators=(',', ': '))))
+ logging.info(json.dumps(project_conf))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate infrastructure names", str(err))
+ sys.exit(1)
try:
logging.info('[CREATE SUBNET]')
@@ -131,18 +156,17 @@
project_conf['user_subnets_range'])
try:
local("~/scripts/{}.py {}".format('common_create_subnet', params))
- project_conf['private_subnet_cidr'] = GCPMeta().get_subnet(project_conf['private_subnet_name'],
- project_conf['region'])['ipCidrRange']
+ project_conf['private_subnet_cidr'] = GCPMeta.get_subnet(project_conf['private_subnet_name'],
+ project_conf['region'])['ipCidrRange']
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
try:
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
except:
print("Subnet hasn't been created.")
- append_result("Failed to create subnet.", str(err))
+ dlab.fab.append_result("Failed to create subnet.", str(err))
sys.exit(1)
print('NEW SUBNET CIDR CREATED: {}'.format(project_conf['private_subnet_cidr']))
@@ -150,8 +174,9 @@
try:
logging.info('[CREATE SERVICE ACCOUNT AND ROLE FOR EDGE NODE]')
print('[CREATE SERVICE ACCOUNT AND ROLE FOR EDGE NODE]')
- params = "--service_account_name {} --role_name {} --unique_index {} --service_base_name {}".format(project_conf['edge_service_account_name'],
- project_conf['edge_role_name'], project_conf['edge_unique_index'], project_conf['service_base_name'])
+ params = "--service_account_name {} --role_name {} --unique_index {} --service_base_name {}".format(
+ project_conf['edge_service_account_name'], project_conf['edge_role_name'],
+ project_conf['edge_unique_index'], project_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('common_create_service_account', params))
@@ -159,22 +184,23 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
try:
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
except:
print("Service account or role hasn't been created")
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
- append_result("Failed to creating service account and role.", str(err))
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ dlab.fab.append_result("Failed to creating service account and role.", str(err))
sys.exit(1)
try:
logging.info('[CREATE SERVICE ACCOUNT AND ROLE FOR PRIVATE SUBNET]')
print('[CREATE SERVICE ACCOUNT AND ROLE FOR NOTEBOOK NODE]')
- params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} --service_base_name {}".format(
- project_conf['ps_service_account_name'], project_conf['ps_role_name'],
- project_conf['ps_policy_path'], project_conf['ps_roles_path'], project_conf['ps_unique_index'], project_conf['service_base_name'])
+ params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} " \
+ "--service_base_name {}".format(
+ project_conf['ps_service_account_name'], project_conf['ps_role_name'], project_conf['ps_policy_path'],
+ project_conf['ps_roles_path'], project_conf['ps_unique_index'], project_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('common_create_service_account', params))
@@ -182,16 +208,17 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
try:
- GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['ps_role_name'])
+ GCPActions.remove_service_account(project_conf['ps_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['ps_role_name'])
except:
print("Service account or role hasn't been created")
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
- append_result("Failed to creating service account and role.", str(err))
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ dlab.fab.append_result("Failed to creating service account and role.", str(err))
sys.exit(1)
try:
@@ -273,13 +300,13 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['ps_role_name'])
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
- append_result("Failed to create firewall for Edge node.", str(err))
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['ps_role_name'])
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
+ dlab.fab.append_result("Failed to create firewall for Edge node.", str(err))
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
sys.exit(1)
try:
@@ -295,8 +322,8 @@
project_conf['ps_firewall_target']
]
ingress_rule['sourceRanges'] = [project_conf['private_subnet_cidr'],
- GCPMeta().get_subnet(project_conf['subnet_name'],
- project_conf['region'])['ipCidrRange']
+ GCPMeta.get_subnet(project_conf['subnet_name'],
+ project_conf['region'])['ipCidrRange']
]
rules = [
{
@@ -314,8 +341,8 @@
project_conf['ps_firewall_target']
]
egress_rule['destinationRanges'] = [project_conf['private_subnet_cidr'],
- GCPMeta().get_subnet(project_conf['subnet_name'],
- project_conf['region'])['ipCidrRange']
+ GCPMeta.get_subnet(project_conf['subnet_name'],
+ project_conf['region'])['ipCidrRange']
]
rules = [
{
@@ -351,17 +378,17 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create firewall for private subnet.", str(err))
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
- GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['ps_role_name'])
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ dlab.fab.append_result("Failed to create firewall for private subnet.", str(err))
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+ GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['ps_role_name'])
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
sys.exit(1)
try:
@@ -372,7 +399,8 @@
"endpoint_tag": project_conf['endpoint_tag'],
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
"sbn": project_conf['service_base_name']}
- params = "--bucket_name {} --tags '{}'".format(project_conf['shared_bucket_name'], json.dumps(project_conf['shared_bucket_tags']))
+ params = "--bucket_name {} --tags '{}'".format(project_conf['shared_bucket_name'],
+ json.dumps(project_conf['shared_bucket_tags']))
try:
local("~/scripts/{}.py {}".format('common_create_bucket', params))
except:
@@ -385,7 +413,8 @@
os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value'],
"sbn": project_conf['service_base_name'],
"project_tag": project_conf['project_tag']}
- params = "--bucket_name {} --tags '{}'".format(project_conf['bucket_name'], json.dumps(project_conf['bucket_tags']))
+ params = "--bucket_name {} --tags '{}'".format(project_conf['bucket_name'],
+ json.dumps(project_conf['bucket_tags']))
try:
local("~/scripts/{}.py {}".format('common_create_bucket', params))
@@ -393,43 +422,45 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to create bucket.", str(err))
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['ps_role_name'])
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ dlab.fab.append_result("Unable to create bucket.", str(err))
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+ GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['ps_role_name'])
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
sys.exit(1)
try:
logging.info('[SET PERMISSIONS FOR USER AND SHARED BUCKETS]')
print('[SET PERMISSIONS FOR USER AND SHARED BUCKETS]')
- GCPActions().set_bucket_owner(project_conf['bucket_name'], project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().set_bucket_owner(project_conf['shared_bucket_name'], project_conf['ps_service_account_name'], project_conf['service_base_name'])
+ GCPActions.set_bucket_owner(project_conf['bucket_name'], project_conf['ps_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.set_bucket_owner(project_conf['shared_bucket_name'], project_conf['ps_service_account_name'],
+ project_conf['service_base_name'])
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set bucket permissions.", str(err))
- GCPActions().remove_bucket(project_conf['bucket_name'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['ps_role_name'])
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ dlab.fab.append_result("Failed to set bucket permissions.", str(err))
+ GCPActions.remove_bucket(project_conf['bucket_name'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+ GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['ps_role_name'])
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
sys.exit(1)
try:
@@ -442,64 +473,67 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create static ip.", str(err))
+ dlab.fab.append_result("Failed to create static ip.", str(err))
try:
- GCPActions().remove_static_address(project_conf['static_address_name'], project_conf['region'])
+ GCPActions.remove_static_address(project_conf['static_address_name'], project_conf['region'])
except:
print("Static IP address hasn't been created.")
- GCPActions().remove_bucket(project_conf['bucket_name'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['ps_role_name'])
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ GCPActions.remove_bucket(project_conf['bucket_name'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+ GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['ps_role_name'])
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
sys.exit(1)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ project_conf['initial_user'] = 'ubuntu'
+ project_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ project_conf['initial_user'] = 'ec2-user'
+ project_conf['sudo_group'] = 'wheel'
try:
project_conf['static_ip'] = \
- GCPMeta().get_static_address(project_conf['region'], project_conf['static_address_name'])['address']
+ GCPMeta.get_static_address(project_conf['region'], project_conf['static_address_name'])['address']
logging.info('[CREATE EDGE INSTANCE]')
print('[CREATE EDGE INSTANCE]')
- params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} --ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} --instance_class {} --static_ip {} --network_tag {} --labels '{}' --service_base_name {}".\
- format(project_conf['instance_name'], project_conf['region'], project_conf['zone'], project_conf['vpc_name'],
- project_conf['subnet_name'], project_conf['instance_size'], project_conf['ssh_key_path'], initial_user,
- project_conf['edge_service_account_name'], project_conf['image_name'], 'edge', project_conf['static_ip'],
- project_conf['network_tag'], json.dumps(project_conf['instance_labels']), project_conf['service_base_name'])
+ params = "--instance_name {} --region {} --zone {} --vpc_name {} --subnet_name {} --instance_size {} " \
+ "--ssh_key_path {} --initial_user {} --service_account_name {} --image_name {} --instance_class {} " \
+ "--static_ip {} --network_tag {} --labels '{}' --service_base_name {}".format(
+ project_conf['instance_name'], project_conf['region'], project_conf['zone'], project_conf['vpc_name'],
+ project_conf['subnet_name'], project_conf['instance_size'], project_conf['ssh_key_path'],
+ project_conf['initial_user'], project_conf['edge_service_account_name'], project_conf['image_name'],
+ 'edge', project_conf['static_ip'], project_conf['network_tag'],
+ json.dumps(project_conf['instance_labels']), project_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create instance.", str(err))
- GCPActions().remove_static_address(project_conf['static_address_name'], project_conf['region'])
- GCPActions().remove_bucket(project_conf['bucket_name'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_ingress_internal'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_public'])
- GCPActions().remove_firewall(project_conf['fw_edge_egress_internal'])
- GCPActions().remove_firewall(project_conf['fw_ps_ingress'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_private'])
- GCPActions().remove_firewall(project_conf['fw_ps_egress_public'])
- GCPActions().remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['ps_role_name'])
- GCPActions().remove_service_account(project_conf['edge_service_account_name'], project_conf['service_base_name'])
- GCPActions().remove_role(project_conf['edge_role_name'])
- GCPActions().remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Failed to create instance.", str(err))
+ GCPActions.remove_static_address(project_conf['static_address_name'], project_conf['region'])
+ GCPActions.remove_bucket(project_conf['bucket_name'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_ingress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_public'])
+ GCPActions.remove_firewall(project_conf['fw_edge_egress_internal'])
+ GCPActions.remove_firewall(project_conf['fw_ps_ingress'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_private'])
+ GCPActions.remove_firewall(project_conf['fw_ps_egress_public'])
+ GCPActions.remove_service_account(project_conf['ps_service_account_name'], project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['ps_role_name'])
+ GCPActions.remove_service_account(project_conf['edge_service_account_name'],
+ project_conf['service_base_name'])
+ GCPActions.remove_role(project_conf['edge_role_name'])
+ GCPActions.remove_subnet(project_conf['private_subnet_name'], project_conf['region'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
index 57af4ad..96c021d 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/project_terminate.py
@@ -22,12 +22,17 @@
# ******************************************************************************
import json
-from dlab.fab import *
-from dlab.meta_lib import *
-import sys, time, os
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import sys
+import time
+import os
+import logging
+import traceback
import requests
+
def terminate_edge_node(endpoint_name, project_name, service_base_name, region, zone):
print("Terminating Dataengine-service clusters")
try:
@@ -35,110 +40,124 @@
{'sbn': service_base_name},
{'project_tag': project_name}
]
- clusters_list = meta_lib.GCPMeta().get_dataproc_list(labels)
+ clusters_list = GCPMeta.get_dataproc_list(labels)
if clusters_list:
for cluster_name in clusters_list:
- actions_lib.GCPActions().delete_dataproc_cluster(cluster_name, region)
+ GCPActions.delete_dataproc_cluster(cluster_name, region)
print('The Dataproc cluster {} has been terminated successfully'.format(cluster_name))
else:
print("There are no Dataproc clusters to terminate.")
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate dataengine-service", str(err))
sys.exit(1)
print("Terminating EDGE and notebook instances")
base = '{}-{}-{}'.format(service_base_name, project_name, endpoint_name)
- keys = ['edge', 'ps', 'ip', 'bucket', 'subnet']
+ keys = ['edge', 'ps', 'static-ip', 'bucket', 'subnet']
targets = ['{}-{}'.format(base, k) for k in keys]
try:
- instances = GCPMeta().get_list_instances(zone, base)
+ instances = GCPMeta.get_list_instances(zone, base)
if 'items' in instances:
for i in instances['items']:
if 'project_tag' in i['labels'] and project_name == i['labels']['project_tag']:
- GCPActions().remove_instance(i['name'], zone)
+ GCPActions.remove_instance(i['name'], zone)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to terminate instances", str(err))
sys.exit(1)
print("Removing static addresses")
try:
- static_addresses = GCPMeta().get_list_static_addresses(region, base)
+ static_addresses = GCPMeta.get_list_static_addresses(region, base)
if 'items' in static_addresses:
for i in static_addresses['items']:
if bool(set(targets) & set([i['name']])):
- GCPActions().remove_static_address(i['name'], region)
+ GCPActions.remove_static_address(i['name'], region)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove static addresses", str(err))
sys.exit(1)
print("Removing storage bucket")
try:
- buckets = GCPMeta().get_list_buckets(base)
+ buckets = GCPMeta.get_list_buckets(base)
if 'items' in buckets:
for i in buckets['items']:
if bool(set(targets) & set([i['name']])):
- GCPActions().remove_bucket(i['name'])
+ GCPActions.remove_bucket(i['name'])
+ except Exception as err:
+ dlab.fab.append_result("Failed to remove storage buckets", str(err))
+ sys.exit(1)
+
+ print("Removing project specific images")
+ try:
+ project_image_name_beginning = '{}-{}'.format(service_base_name, project_name)
+ images = GCPMeta.get_list_images(project_image_name_beginning)
+ if 'items' in images:
+ for i in images['items']:
+ GCPActions.remove_image(i['name'])
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
print("Removing firewalls")
try:
- firewalls = GCPMeta().get_list_firewalls(base)
+ firewalls = GCPMeta.get_list_firewalls(base)
if 'items' in firewalls:
for i in firewalls['items']:
if bool(set(targets) & set(i['targetTags'])):
- GCPActions().remove_firewall(i['name'])
+ GCPActions.remove_firewall(i['name'])
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove security groups", str(err))
sys.exit(1)
print("Removing Service accounts and roles")
try:
- list_service_accounts = GCPMeta().get_list_service_accounts()
- role_targets = ['{}-{}-{}'.format(base, meta_lib.GCPMeta().get_index_by_service_account_name('{}-{}'.format(base, k)), k) for k in keys]
- for service_account in (set(targets) & set(list_service_accounts)):
- if service_account.startswith(service_base_name) and service_account.endswith('-edge'):
- GCPActions().remove_service_account(service_account, service_base_name)
- elif service_account.startswith(service_base_name) and service_account.endswith('-ps'):
- GCPActions().remove_service_account(service_account, service_base_name)
- list_roles_names = GCPMeta().get_list_roles()
+ list_service_accounts = GCPMeta.get_list_service_accounts()
+ sa_keys = ['edge-sa', 'ps-sa']
+ role_keys = ['edge-role', 'ps-role']
+ sa_target = ['{}-{}'.format(base, k) for k in sa_keys]
+ indexes = [GCPMeta.get_index_by_service_account_name('{}-{}'.format(base, k)) for k in sa_keys]
+ role_targets = ['{}-{}-{}'.format(base, i, k) for k in role_keys for i in indexes]
+ for service_account in (set(sa_target) & set(list_service_accounts)):
+ GCPActions.remove_service_account(service_account, service_base_name)
+ list_roles_names = GCPMeta.get_list_roles()
for role in (set(role_targets) & set(list_roles_names)):
- if role.startswith(service_base_name):
- GCPActions().remove_role(role)
+ GCPActions.remove_role(role)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove service accounts and roles", str(err))
sys.exit(1)
print("Removing subnets")
try:
- list_subnets = GCPMeta().get_list_subnetworks(region, '', base)
+ list_subnets = GCPMeta.get_list_subnetworks(region, '', base)
if 'items' in list_subnets:
vpc_selflink = list_subnets['items'][0]['network']
vpc_name = vpc_selflink.split('/')[-1]
- subnets = GCPMeta().get_list_subnetworks(region, vpc_name, base)
+ subnets = GCPMeta.get_list_subnetworks(region, vpc_name, base)
for i in subnets['items']:
if bool(set(targets) & set([i['name']])):
- GCPActions().remove_subnet(i['name'], region)
+ GCPActions.remove_subnet(i['name'], region)
except Exception as err:
- print('Error: {0}'.format(err))
+ dlab.fab.append_result("Failed to remove subnets", str(err))
sys.exit(1)
if __name__ == "__main__":
- local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id'])
+ local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
+ os.environ['request_id'])
local_log_filepath = "/logs/project/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
# generating variables dictionary
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
print('Generating infrastructure names and tags')
project_conf = dict()
- project_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- project_conf['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- project_conf['endpoint_name'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- project_conf['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
+ project_conf['service_base_name'] = (os.environ['conf_service_base_name'])
+ project_conf['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ project_conf['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ project_conf['project_tag'] = project_conf['project_name']
project_conf['region'] = os.environ['gcp_region']
project_conf['zone'] = os.environ['gcp_zone']
@@ -146,11 +165,12 @@
logging.info('[TERMINATE EDGE]')
print('[TERMINATE EDGE]')
try:
- terminate_edge_node(project_conf['endpoint_name'], project_conf['project_name'], project_conf['service_base_name'],
+ terminate_edge_node(project_conf['endpoint_name'], project_conf['project_name'],
+ project_conf['service_base_name'],
project_conf['region'], project_conf['zone'])
except Exception as err:
traceback.print_exc()
- append_result("Failed to terminate edge.", str(err))
+ dlab.fab.append_result("Failed to terminate edge.", str(err))
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
@@ -158,8 +178,10 @@
try:
print('[KEYCLOAK PROJECT CLIENT DELETE]')
logging.info('[KEYCLOAK PROJECT CLIENT DELETE]')
- keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(os.environ['keycloak_auth_server_url'])
- keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'], os.environ['keycloak_realm_name'])
+ keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+ os.environ['keycloak_auth_server_url'])
+ keycloak_client_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+ os.environ['keycloak_realm_name'])
keycloak_auth_data = {
"username": os.environ['keycloak_user'],
@@ -169,7 +191,8 @@
}
client_params = {
- "clientId": project_conf['service_base_name'] + '-' + os.environ['project_name'] + '-' + os.environ['endpoint_name'],
+ "clientId": "{}-{}-{}".format(project_conf['service_base_name'], project_conf['project_name'],
+ project_conf['endpoint_name'])
}
keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
@@ -192,11 +215,11 @@
try:
with open("/root/result.json", 'w') as result:
- res = {"service_base_name": edge_conf['service_base_name'],
- "project_name": edge_conf['project_name'],
+ res = {"service_base_name": project_conf['service_base_name'],
+ "project_name": project_conf['project_name'],
"Action": "Terminate project"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
index d5e5b08..6ea7a90 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/rstudio_configure.py
@@ -24,10 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+from fabric.api import *
if __name__ == "__main__":
@@ -38,68 +40,78 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
- notebook_config['ip_address'] = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- notebook_config['rstudio_pass'] = id_generator()
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ notebook_config['ip_address'] = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, notebook_config['ssh_key_path'], initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -107,9 +119,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -127,9 +138,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
@@ -145,9 +155,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# installing and configuring RStudio and all dependencies
@@ -157,7 +166,7 @@
params = "--hostname {0} --keyfile {1} " \
"--region {2} --rstudio_pass {3} " \
"--rstudio_version {4} --os_user {5} " \
- "--r_mirror {6} --ip_adress {7} --exploratory_name {8} --edge_ip {9}" \
+ "--r_mirror {6} --ip_address {7} --exploratory_name {8} --edge_ip {9}" \
.format(instance_hostname, notebook_config['ssh_key_path'],
os.environ['gcp_region'], notebook_config['rstudio_pass'],
os.environ['notebook_rstudio_version'], notebook_config['dlab_ssh_user'],
@@ -169,9 +178,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure RStudio.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure RStudio.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -180,16 +188,16 @@
additional_config = {"user_keyname": os.environ['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -201,35 +209,34 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
else:
- print("Looks like another image creating operation for your template have been started a moment ago.")
+ print("Looks like another image creating operation for your template have been started a "
+ "moment ago.")
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -254,54 +261,58 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ rstudio_ip_url = "http://" + ip_address + ":8787/"
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+ notebook_config['exploratory_name'])
+ rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(os.environ['project_name']))
+ print("Rstudio URL: {}".format(rstudio_ip_url))
+ print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+ print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- rstudio_ip_url = "http://" + ip_address + ":8787/"
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
- rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("Rstudio URL: {}".format(rstudio_ip_url))
- print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
- print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
-
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "RStudio",
- "url": rstudio_notebook_access_url},
- {"description": "Ungit",
- "url": rstudio_ungit_access_url}#,
- #{"description": "RStudio (via tunnel)",
- # "url": rstudio_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ],
- "exploratory_user": notebook_config['dlab_ssh_user'],
- "exploratory_pass": notebook_config['rstudio_pass']}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "RStudio",
+ "url": rstudio_notebook_access_url},
+ {"description": "Ungit",
+ "url": rstudio_ungit_access_url}#,
+ #{"description": "RStudio (via tunnel)",
+ # "url": rstudio_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ],
+ "exploratory_user": notebook_config['dlab_ssh_user'],
+ "exploratory_pass": notebook_config['rstudio_pass']}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
index 8022bad..a47e53b 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_configure.py
@@ -21,15 +21,16 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
import sys, os
from fabric.api import *
-from dlab.ssn_lib import *
import traceback
import json
import argparse
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import logging
parser = argparse.ArgumentParser()
parser.add_argument('--ssn_unique_index', type=str, default='')
@@ -41,45 +42,58 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
- instance = 'ssn'
+
+ def clear_resources():
+ GCPActions.remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
+ GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+ GCPActions.remove_role(ssn_conf['role_name'])
+ if not ssn_conf['pre_defined_firewall']:
+ GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+ GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+ if not ssn_conf['pre_defined_subnet']:
+ GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+ if not ssn_conf['pre_defined_vpc']:
+ GCPActions.remove_vpc(ssn_conf['vpc_name'])
try:
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
logging.info('[DERIVING NAMES]')
print('[DERIVING NAMES]')
- pre_defined_vpc = False
- pre_defined_subnet = False
- pre_defined_firewall = False
- billing_enabled = True
-
ssn_conf = dict()
+ ssn_conf['instance'] = 'ssn'
+ ssn_conf['pre_defined_vpc'] = False
+ ssn_conf['pre_defined_subnet'] = False
+ ssn_conf['pre_defined_firewall'] = False
+ ssn_conf['billing_enabled'] = True
+
ssn_conf['ssn_unique_index'] = args.ssn_unique_index
- ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
+ ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
+ ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+ ssn_conf['role_name'] = '{}-{}-ssn-role'.format(ssn_conf['service_base_name'], ssn_conf['ssn_unique_index'])
ssn_conf['region'] = os.environ['gcp_region']
ssn_conf['zone'] = os.environ['gcp_zone']
- ssn_conf['ssn_bucket_name'] = '{}-ssn-bucket'.format(ssn_conf['service_base_name'])
ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
- ssn_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(ssn_conf['service_base_name'],
- ssn_conf['default_endpoint_name'])
ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
try:
if os.environ['gcp_vpc_name'] == '':
raise KeyError
else:
- pre_defined_vpc = True
+ ssn_conf['pre_defined_vpc'] = True
ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
except KeyError:
- ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
+ ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
try:
if os.environ['gcp_subnet_name'] == '':
raise KeyError
else:
- pre_defined_subnet = True
+ ssn_conf['pre_defined_subnet'] = True
ssn_conf['subnet_name'] = os.environ['gcp_subnet_name']
except KeyError:
- ssn_conf['subnet_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
+ ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
try:
if os.environ['gcp_firewall_name'] == '':
raise KeyError
@@ -87,13 +101,11 @@
pre_defined_firewall = True
ssn_conf['firewall_name'] = os.environ['gcp_firewall_name']
except KeyError:
- ssn_conf['firewall_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
- ssn_conf['subnet_cidr'] = '10.10.1.0/24'
+ ssn_conf['firewall_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
ssn_conf['dlab_ssh_user'] = os.environ['conf_os_user']
ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
- ssn_conf['role_name'] = ssn_conf['service_base_name'] + '-' + ssn_conf['ssn_unique_index'] + '-ssn-role'
try:
if os.environ['aws_account_id'] == '':
@@ -101,46 +113,36 @@
if os.environ['aws_billing_bucket'] == '':
raise KeyError
except KeyError:
- billing_enabled = False
- if not billing_enabled:
+ ssn_conf['billing_enabled'] = False
+ if not ssn_conf['billing_enabled']:
os.environ['aws_account_id'] = 'None'
os.environ['aws_billing_bucket'] = 'None'
os.environ['aws_report_path'] = 'None'
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed deriving names.", str(err))
- GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.dlab.fab.append_result("Failed deriving names.", str(err))
+ clear_resources()
sys.exit(1)
try:
- instance_hostname = GCPMeta().get_instance_public_ip_by_name(ssn_conf['instance_name'])
+ ssn_conf['instance_hostname'] = GCPMeta.get_instance_public_ip_by_name(ssn_conf['instance_name'])
if os.environ['conf_stepcerts_enabled'] == 'true':
- step_cert_sans = ' --san {0} --san {1}'.format(GCPMeta().get_instance_public_ip_by_name(
- ssn_conf['instance_name']), get_instance_private_ip_address('ssn', ssn_conf['instance_name']))
+ ssn_conf['step_cert_sans'] = ' --san {0} --san {1}'.format(GCPMeta.get_instance_public_ip_by_name(
+ ssn_conf['instance_name']), dlab.meta_lib.get_instance_private_ip_address('ssn',
+ ssn_conf['instance_name']))
else:
- step_cert_sans = ''
+ ssn_conf['step_cert_sans'] = ''
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ ssn_conf['initial_user'] = 'ubuntu'
+ ssn_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ ssn_conf['initial_user'] = 'ec2-user'
+ ssn_conf['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, ssn_conf['ssh_key_path'], initial_user, ssn_conf['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], ssn_conf['initial_user'],
+ ssn_conf['dlab_ssh_user'], ssn_conf['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -148,20 +150,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab-user'.", str(err))
- GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.dlab.fab.append_result("Failed creating ssh user 'dlab-user'.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -170,7 +160,7 @@
params = "--hostname {} --keyfile {} --pip_packages " \
"'boto3 backoff argparse fabric==1.14.0 awscli pymongo pyyaml " \
"google-api-python-client google-cloud-storage pycrypto' --user {} --region {}". \
- format(instance_hostname, ssn_conf['ssh_key_path'],
+ format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'],
ssn_conf['dlab_ssh_user'], ssn_conf['region'])
try:
@@ -179,20 +169,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing software: pip, packages.", str(err))
- GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.dlab.fab.append_result("Failed installing software: pip, packages.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -204,9 +182,9 @@
"subnet_id": ssn_conf['subnet_name'], "admin_key": os.environ['conf_key_name']}
params = "--hostname {} --keyfile {} --additional_config '{}' --os_user {} --dlab_path {} " \
"--tag_resource_id {} --step_cert_sans '{}'". \
- format(instance_hostname, ssn_conf['ssh_key_path'], json.dumps(additional_config),
+ format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'], ssn_conf['service_base_name'],
- step_cert_sans)
+ ssn_conf['step_cert_sans'])
try:
local("~/scripts/{}.py {}".format('configure_ssn_node', params))
@@ -214,20 +192,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed configuring ssn.", str(err))
- GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.dlab.fab.append_result("Failed configuring ssn.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -247,32 +213,19 @@
{"name": "deeplearning", "tag": "latest"},
{"name": "dataengine", "tag": "latest"},
{"name": "dataengine-service", "tag": "latest"}]
- params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {}" \
- " --cloud_provider {} --region {} --gcr_creds {} --odahu_image {}". \
- format(instance_hostname, ssn_conf['ssh_key_path'], json.dumps(additional_config),
+ params = "--hostname {} --keyfile {} --additional_config '{}' --os_family {} --os_user {} --dlab_path {} " \
+ "--cloud_provider {} --region {} --gcr_creds {} --odahu_image {}". \
+ format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], json.dumps(additional_config),
os.environ['conf_os_family'], ssn_conf['dlab_ssh_user'], os.environ['ssn_dlab_path'],
os.environ['conf_cloud_provider'], ssn_conf['region'], os.environ['ssn_gcr_creds'], os.environ['odahu_deploy_image'])
-
try:
local("~/scripts/{}.py {}".format('configure_docker', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to configure docker.", str(err))
- GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.dlab.fab.append_result("Unable to configure docker.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -282,7 +235,7 @@
cloud_params = [
{
'key': 'KEYCLOAK_REDIRECT_URI',
- 'value': "https://{0}/".format(instance_hostname)
+ 'value': "https://{0}/".format(ssn_conf['instance_hostname'])
},
{
'key': 'KEYCLOAK_REALM_NAME',
@@ -409,10 +362,6 @@
'value': ''
},
{
- 'key': 'SHARED_IMAGE_ENABLED',
- 'value': os.environ['conf_shared_image_enabled']
- },
- {
'key': 'CONF_IMAGE_ENABLED',
'value': os.environ['conf_image_enabled']
},
@@ -477,9 +426,9 @@
"--request_id {} --billing_dataset_name {} \
--resource {} --service_base_name {} --cloud_provider {} --default_endpoint_name {} " \
"--cloud_params '{}'". \
- format(instance_hostname, ssn_conf['ssh_key_path'], os.environ['ssn_dlab_path'], ssn_conf['dlab_ssh_user'],
- os.environ['conf_os_family'], billing_enabled, os.environ['request_id'],
- os.environ['billing_dataset_name'], os.environ['conf_resource'],
+ format(ssn_conf['instance_hostname'], ssn_conf['ssh_key_path'], os.environ['ssn_dlab_path'],
+ ssn_conf['dlab_ssh_user'], os.environ['conf_os_family'], ssn_conf['billing_enabled'],
+ os.environ['request_id'], os.environ['billing_dataset_name'], os.environ['conf_resource'],
ssn_conf['service_base_name'], os.environ['conf_cloud_provider'], ssn_conf['default_endpoint_name'],
json.dumps(cloud_params))
try:
@@ -488,20 +437,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to configure UI.", str(err))
- GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.dlab.fab.append_result("Unable to configure UI.", str(err))
+ clear_resources()
sys.exit(1)
try:
@@ -509,7 +446,7 @@
print('[SUMMARY]')
print("Service base name: {}".format(ssn_conf['service_base_name']))
print("SSN Name: {}".format(ssn_conf['instance_name']))
- print("SSN Hostname: {}".format(instance_hostname))
+ print("SSN Hostname: {}".format(ssn_conf['instance_hostname']))
print("Role name: {}".format(ssn_conf['role_name']))
print("Key name: {}".format(os.environ['conf_key_name']))
print("VPC Name: {}".format(ssn_conf['vpc_name']))
@@ -517,14 +454,13 @@
print("Firewall Names: {}".format(ssn_conf['firewall_name']))
print("SSN instance size: {}".format(ssn_conf['instance_size']))
print("SSN AMI name: {}".format(ssn_conf['image_name']))
- print("SSN bucket name: {}".format(ssn_conf['ssn_bucket_name']))
print("Region: {}".format(ssn_conf['region']))
- jenkins_url = "http://{}/jenkins".format(instance_hostname)
- jenkins_url_https = "https://{}/jenkins".format(instance_hostname)
+ jenkins_url = "http://{}/jenkins".format(ssn_conf['instance_hostname'])
+ jenkins_url_https = "https://{}/jenkins".format(ssn_conf['instance_hostname'])
print("Jenkins URL: {}".format(jenkins_url))
print("Jenkins URL HTTPS: {}".format(jenkins_url_https))
- print("DLab UI HTTP URL: http://{}".format(instance_hostname))
- print("DLab UI HTTPS URL: https://{}".format(instance_hostname))
+ print("DLab UI HTTP URL: http://{}".format(ssn_conf['instance_hostname']))
+ print("DLab UI HTTPS URL: https://{}".format(ssn_conf['instance_hostname']))
try:
with open('jenkins_creds.txt') as f:
print(f.read())
@@ -534,37 +470,23 @@
with open("/root/result.json", 'w') as f:
res = {"service_base_name": ssn_conf['service_base_name'],
"instance_name": ssn_conf['instance_name'],
- "instance_hostname": instance_hostname,
+ "instance_hostname": ssn_conf['instance_hostname'],
"role_name": ssn_conf['role_name'],
- #"role_profile_name": role_profile_name,
- #"policy_name": policy_name,
"master_keyname": os.environ['conf_key_name'],
"vpc_id": ssn_conf['vpc_name'],
"subnet_id": ssn_conf['subnet_name'],
"security_id": ssn_conf['firewall_name'],
"instance_shape": ssn_conf['instance_size'],
- "bucket_name": ssn_conf['ssn_bucket_name'],
- "shared_bucket_name": ssn_conf['shared_bucket_name'],
"region": ssn_conf['region'],
"action": "Create SSN instance"}
f.write(json.dumps(res))
print('Upload response file')
params = "--instance_name {} --local_log_filepath {} --os_user {} --instance_hostname {}".\
- format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'], instance_hostname)
+ format(ssn_conf['instance_name'], local_log_filepath, ssn_conf['dlab_ssh_user'],
+ ssn_conf['instance_hostname'])
local("~/scripts/{}.py {}".format('upload_response_file', params))
except Exception as err:
- print('Error: {0}'.format(err))
- GCPActions().remove_instance(ssn_conf['instance_name'], ssn_conf['zone'])
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.append_result("Error with writing results.", str(err))
+ clear_resources()
sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
index f776fce..8cf209d 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_prepare.py
@@ -21,14 +21,16 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-from dlab.meta_lib import *
import sys, os
from fabric.api import *
-from dlab.ssn_lib import *
import json
import argparse
+import logging
+import traceback
+import dlab.ssn_lib
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
parser = argparse.ArgumentParser()
parser.add_argument('--ssn_unique_index', type=str, default='')
@@ -36,58 +38,61 @@
if __name__ == "__main__":
local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
- local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
+ local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
- instance = 'ssn'
- pre_defined_vpc = False
- pre_defined_subnet = False
- pre_defined_firewall = False
- logging.info('[DERIVING NAMES]')
- print('[DERIVING NAMES]')
- ssn_conf = dict()
- ssn_conf['ssn_unique_index'] = args.ssn_unique_index
- ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
- ssn_conf['region'] = os.environ['gcp_region']
- ssn_conf['zone'] = os.environ['gcp_zone']
- ssn_conf['ssn_bucket_name'] = '{}-ssn-bucket'.format(ssn_conf['service_base_name'])
- ssn_conf['default_endpoint_name'] = os.environ['default_endpoint_name']
- ssn_conf['shared_bucket_name'] = '{0}-{1}-shared-bucket'.format(ssn_conf['service_base_name'],
- ssn_conf['default_endpoint_name'])
- ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
- ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
- ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
- ssn_conf['subnet_name'] = '{}-ssn-subnet'.format(ssn_conf['service_base_name'])
- ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
- ssn_conf['subnet_prefix'] = '20'
- ssn_conf['firewall_name'] = '{}-ssn-firewall'.format(ssn_conf['service_base_name'])
- ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
- ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
- ssn_conf['role_name'] = ssn_conf['service_base_name'] + '-' + ssn_conf['ssn_unique_index'] + '-ssn-role'
- ssn_conf['static_address_name'] = '{}-ssn-ip'.format(ssn_conf['service_base_name'])
- ssn_conf['ssn_policy_path'] = '/root/files/ssn_policy.json'
- ssn_conf['ssn_roles_path'] = '/root/files/ssn_roles.json'
- ssn_conf['network_tag'] = ssn_conf['instance_name']
- ssn_conf['instance_labels'] = {"name": ssn_conf['instance_name'],
- "sbn": ssn_conf['service_base_name'],
- os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
- ssn_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+ try:
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ ssn_conf = dict()
+ ssn_conf['instance'] = 'ssn'
+ ssn_conf['pre_defined_vpc'] = False
+ ssn_conf['pre_defined_subnet'] = False
+ ssn_conf['pre_defined_firewall'] = False
+ logging.info('[DERIVING NAMES]')
+ print('[DERIVING NAMES]')
+ ssn_conf['ssn_unique_index'] = args.ssn_unique_index
+ ssn_conf['service_base_name'] = os.environ['conf_service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
+ ssn_conf['region'] = os.environ['gcp_region']
+ ssn_conf['zone'] = os.environ['gcp_zone']
+ ssn_conf['instance_name'] = '{}-ssn'.format(ssn_conf['service_base_name'])
+ ssn_conf['instance_size'] = os.environ['gcp_ssn_instance_size']
+ ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
+ ssn_conf['subnet_name'] = '{}-subnet'.format(ssn_conf['service_base_name'])
+ ssn_conf['vpc_cidr'] = os.environ['conf_vpc_cidr']
+ ssn_conf['subnet_prefix'] = '20'
+ ssn_conf['firewall_name'] = '{}-ssn-sg'.format(ssn_conf['service_base_name'])
+ ssn_conf['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ ssn_conf['service_account_name'] = '{}-ssn-sa'.format(ssn_conf['service_base_name']).replace('_', '-')
+ ssn_conf['image_name'] = os.environ['gcp_{}_image_name'.format(os.environ['conf_os_family'])]
+ ssn_conf['role_name'] = '{}-{}-ssn-role'.format(ssn_conf['service_base_name'], ssn_conf['ssn_unique_index'])
+ ssn_conf['static_address_name'] = '{}-ssn-static-ip'.format(ssn_conf['service_base_name'])
+ ssn_conf['ssn_policy_path'] = '/root/files/ssn_policy.json'
+ ssn_conf['ssn_roles_path'] = '/root/files/ssn_roles.json'
+ ssn_conf['network_tag'] = ssn_conf['instance_name']
+ ssn_conf['instance_labels'] = {"name": ssn_conf['instance_name'],
+ "sbn": ssn_conf['service_base_name'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ ssn_conf['allowed_ip_cidr'] = os.environ['conf_allowed_ip_cidr']
+ except Exception as err:
+ dlab.fab.dlab.fab.append_result("Failed to generate variables dictionary.", str(err))
+ sys.exit(1)
- if GCPMeta().get_instance(ssn_conf['instance_name']):
- print("Service base name should be unique and less or equal 12 symbols. Please try again.")
+ if GCPMeta.get_instance(ssn_conf['instance_name']):
+ dlab.fab.dlab.fab.append_result("Service base name should be unique and less or equal 20 symbols. "
+ "Please try again.")
sys.exit(1)
try:
if os.environ['gcp_vpc_name'] == '':
raise KeyError
else:
+ ssn_conf['pre_defined_vpc'] = True
ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
except KeyError:
try:
- pre_defined_vpc = True
logging.info('[CREATE VPC]')
print('[CREATE VPC]')
params = "--vpc_name {}".format(ssn_conf['vpc_name'])
@@ -98,24 +103,23 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create VPC. Exception:" + str(err))
- if pre_defined_vpc:
+ dlab.fab.append_result("Failed to create VPC.", str(err))
+ if not ssn_conf['pre_defined_vpc']:
try:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ GCPActions.remove_vpc(ssn_conf['vpc_name'])
except:
print("VPC hasn't been created.")
sys.exit(1)
try:
- ssn_conf['vpc_selflink'] = GCPMeta().get_vpc(ssn_conf['vpc_name'])['selfLink']
+ ssn_conf['vpc_selflink'] = GCPMeta.get_vpc(ssn_conf['vpc_name'])['selfLink']
if os.environ['gcp_subnet_name'] == '':
raise KeyError
else:
+ ssn_conf['pre_defined_subnet'] = True
ssn_conf['subnet_name'] = os.environ['gcp_subnet_name']
except KeyError:
try:
- pre_defined_subnet = True
logging.info('[CREATE SUBNET]')
print('[CREATE SUBNET]')
params = "--subnet_name {} --region {} --vpc_selflink {} --prefix {} --vpc_cidr {} --ssn {}".\
@@ -128,14 +132,14 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create Subnet.", str(err))
- if pre_defined_vpc:
+ dlab.fab.append_result("Failed to create Subnet.", str(err))
+ if not ssn_conf['pre_defined_subnet']:
try:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+ GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
except:
print("Subnet hasn't been created.")
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ if not ssn_conf['pre_defined_vpc']:
+ GCPActions.remove_vpc(ssn_conf['vpc_name'])
sys.exit(1)
@@ -143,10 +147,10 @@
if os.environ['gcp_firewall_name'] == '':
raise KeyError
else:
+ ssn_conf['pre_defined_firewall'] = True
ssn_conf['firewall_name'] = os.environ['gcp_firewall_name']
except KeyError:
try:
- pre_defined_firewall = True
logging.info('[CREATE FIREWALL]')
print('[CREATE FIREWALL]')
firewall_rules = dict()
@@ -154,7 +158,7 @@
firewall_rules['egress'] = []
ingress_rule = dict()
- ingress_rule['name'] = ssn_conf['firewall_name'] + '-ingress'
+ ingress_rule['name'] = '{}-ingress'.format(ssn_conf['firewall_name'])
ingress_rule['targetTags'] = [ssn_conf['network_tag']]
ingress_rule['sourceRanges'] = [ssn_conf['allowed_ip_cidr']]
rules = [
@@ -169,7 +173,7 @@
firewall_rules['ingress'].append(ingress_rule)
egress_rule = dict()
- egress_rule['name'] = ssn_conf['firewall_name'] + '-egress'
+ egress_rule['name'] = '{}-egress'.format(ssn_conf['firewall_name'])
egress_rule['targetTags'] = [ssn_conf['network_tag']]
egress_rule['destinationRanges'] = [ssn_conf['allowed_ip_cidr']]
rules = [
@@ -190,39 +194,39 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create Firewall.", str(err))
- if pre_defined_vpc:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ dlab.fab.append_result("Failed to create Firewall.", str(err))
+ if not ssn_conf['pre_defined_subnet']:
+ GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+ if not ssn_conf['pre_defined_vpc']:
+ GCPActions.remove_vpc(ssn_conf['vpc_name'])
sys.exit(1)
try:
logging.info('[CREATE SERVICE ACCOUNT AND ROLE]')
print('[CREATE SERVICE ACCOUNT AND ROLE]')
- params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} --service_base_name {}".format(
- ssn_conf['service_account_name'], ssn_conf['role_name'],
- ssn_conf['ssn_policy_path'], ssn_conf['ssn_roles_path'], ssn_conf['ssn_unique_index'], ssn_conf['service_base_name'])
+ params = "--service_account_name {} --role_name {} --policy_path {} --roles_path {} --unique_index {} " \
+ "--service_base_name {}".format( ssn_conf['service_account_name'], ssn_conf['role_name'],
+ ssn_conf['ssn_policy_path'], ssn_conf['ssn_roles_path'],
+ ssn_conf['ssn_unique_index'], ssn_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('common_create_service_account', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to create Service account and role.", str(err))
+ dlab.fab.append_result("Unable to create Service account and role.", str(err))
try:
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
+ GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+ GCPActions.remove_role(ssn_conf['role_name'])
except:
print("Service account hasn't been created")
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ if not ssn_conf['pre_defined_firewall']:
+ GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+ GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+ if not ssn_conf['pre_defined_subnet']:
+ GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+ if not ssn_conf['pre_defined_vpc']:
+ GCPActions.remove_vpc(ssn_conf['vpc_name'])
sys.exit(1)
try:
@@ -235,62 +239,62 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to create static ip.", str(err))
+ dlab.fab.append_result("Failed to create static ip.", str(err))
try:
- GCPActions().remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
+ GCPActions.remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
except:
print("Static IP address hasn't been created.")
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
+ GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+ GCPActions.remove_role(ssn_conf['role_name'])
+ GCPActions.remove_bucket(ssn_conf['ssn_bucket_name'])
+ GCPActions.remove_bucket(ssn_conf['shared_bucket_name'])
+ if not ssn_conf['pre_defined_firewall']:
+ GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+ GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+ if not ssn_conf['pre_defined_subnet']:
+ GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+ if not ssn_conf['pre_defined_vpc']:
+ GCPActions.remove_vpc(ssn_conf['vpc_name'])
sys.exit(1)
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ ssn_conf['initial_user'] = 'ubuntu'
+ ssn_conf['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ ssn_conf['initial_user'] = 'ec2-user'
+ ssn_conf['sudo_group'] = 'wheel'
try:
- ssn_conf['static_ip'] = \
- GCPMeta().get_static_address(ssn_conf['region'], ssn_conf['static_address_name'])['address']
+ ssn_conf['static_ip'] = GCPMeta.get_static_address(ssn_conf['region'],
+ ssn_conf['static_address_name'])['address']
logging.info('[CREATE SSN INSTANCE]')
print('[CREATE SSN INSTANCE]')
params = "--instance_name {0} --region {1} --zone {2} --vpc_name {3} --subnet_name {4} --instance_size {5}"\
" --ssh_key_path {6} --initial_user {7} --service_account_name {8} --image_name {9}"\
- " --instance_class {10} --static_ip {11} --network_tag {12} --labels '{13}' --primary_disk_size {14} --service_base_name {15}".\
+ " --instance_class {10} --static_ip {11} --network_tag {12} --labels '{13}' " \
+ "--primary_disk_size {14} --service_base_name {15}".\
format(ssn_conf['instance_name'], ssn_conf['region'], ssn_conf['zone'], ssn_conf['vpc_name'],
- ssn_conf['subnet_name'], ssn_conf['instance_size'], ssn_conf['ssh_key_path'], initial_user,
- ssn_conf['service_account_name'], ssn_conf['image_name'], 'ssn', ssn_conf['static_ip'],
- ssn_conf['network_tag'], json.dumps(ssn_conf['instance_labels']), '20', ssn_conf['service_base_name'])
+ ssn_conf['subnet_name'], ssn_conf['instance_size'], ssn_conf['ssh_key_path'],
+ ssn_conf['initial_user'], ssn_conf['service_account_name'], ssn_conf['image_name'], 'ssn',
+ ssn_conf['static_ip'], ssn_conf['network_tag'], json.dumps(ssn_conf['instance_labels']), '20',
+ ssn_conf['service_base_name'])
try:
local("~/scripts/{}.py {}".format('common_create_instance', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Unable to create ssn instance.", str(err))
- GCPActions().remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
- GCPActions().remove_role(ssn_conf['role_name'])
- GCPActions().remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
- GCPActions().remove_bucket(ssn_conf['ssn_bucket_name'])
- GCPActions().remove_bucket(ssn_conf['shared_bucket_name'])
- if pre_defined_firewall:
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-ingress')
- GCPActions().remove_firewall(ssn_conf['firewall_name'] + '-egress')
- if pre_defined_subnet:
- GCPActions().remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
- if pre_defined_vpc:
- GCPActions().remove_vpc(ssn_conf['vpc_name'])
- sys.exit(1)
\ No newline at end of file
+ dlab.fab.append_result("Unable to create ssn instance.", str(err))
+ GCPActions.remove_service_account(ssn_conf['service_account_name'], ssn_conf['service_base_name'])
+ GCPActions.remove_role(ssn_conf['role_name'])
+ GCPActions.remove_static_address(ssn_conf['static_address_name'], ssn_conf['region'])
+ GCPActions.remove_bucket(ssn_conf['ssn_bucket_name'])
+ GCPActions.remove_bucket(ssn_conf['shared_bucket_name'])
+ if not ssn_conf['pre_defined_firewall']:
+ GCPActions.remove_firewall('{}-ingress'.format(ssn_conf['firewall_name']))
+ GCPActions.remove_firewall('{}-egress'.format(ssn_conf['firewall_name']))
+ if not ssn_conf['pre_defined_subnet']:
+ GCPActions.remove_subnet(ssn_conf['subnet_name'], ssn_conf['region'])
+ if not ssn_conf['pre_defined_vpc']:
+ GCPActions.remove_vpc(ssn_conf['vpc_name'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
index c033429..3e20a15 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/ssn_terminate.py
@@ -21,11 +21,16 @@
#
# ******************************************************************************
-from dlab.fab import *
-from dlab.actions_lib import *
-import sys, os
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
+import dlab.ssn_lib
+import sys
+import os
+import logging
+import json
+import traceback
from fabric.api import *
-from dlab.ssn_lib import *
if __name__ == "__main__":
local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
@@ -36,8 +41,8 @@
# generating variables dictionary
print('Generating infrastructure names and tags')
ssn_conf = dict()
- ssn_conf['service_base_name'] = replace_multi_symbols(
- os.environ['conf_service_base_name'].lower().replace('_', '-')[:12], '-', True)
+ ssn_conf['service_base_name'] = dlab.fab.replace_multi_symbols(
+ os.environ['conf_service_base_name'].replace('_', '-').lower()[:20], '-', True)
ssn_conf['region'] = os.environ['gcp_region']
ssn_conf['zone'] = os.environ['gcp_zone']
pre_defined_vpc = False
@@ -48,7 +53,7 @@
pre_defined_vpc = True
ssn_conf['vpc_name'] = os.environ['gcp_vpc_name']
except KeyError:
- ssn_conf['vpc_name'] = '{}-ssn-vpc'.format(ssn_conf['service_base_name'])
+ ssn_conf['vpc_name'] = '{}-vpc'.format(ssn_conf['service_base_name'])
try:
logging.info('[TERMINATE SSN]')
@@ -61,8 +66,7 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to terminate ssn.", str(err))
+ dlab.fab.append_result("Failed to terminate ssn.", str(err))
sys.exit(1)
try:
@@ -71,6 +75,6 @@
"Action": "Terminate ssn with all service_base_name environment"}
print(json.dumps(res))
result.write(json.dumps(res))
- except:
- print("Failed writing results.")
- sys.exit(0)
\ No newline at end of file
+ except Exception as err:
+ dlab.fab.append_result("Error with writing results", str(err))
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
index 6cfd891..e43517e 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/superset_configure.py
@@ -25,10 +25,13 @@
import json
import sys
import requests
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import traceback
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
+import uuid
+from fabric.api import *
if __name__ == "__main__":
@@ -38,69 +41,76 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "project_tag": notebook_config['project_tag'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
+
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, notebook_config['ssh_key_path'], initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -108,9 +118,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -127,16 +136,17 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
print('[CONFIGURE KEYCLOAK]')
logging.info('[CONFIGURE KEYCLOAK]')
- keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(os.environ['keycloak_auth_server_url'])
- keycloak_client_create_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'], os.environ['keycloak_realm_name'])
+ keycloak_auth_server_url = '{}/realms/master/protocol/openid-connect/token'.format(
+ os.environ['keycloak_auth_server_url'])
+ keycloak_client_create_url = '{0}/admin/realms/{1}/clients'.format(os.environ['keycloak_auth_server_url'],
+ os.environ['keycloak_realm_name'])
keycloak_auth_data = {
"username": os.environ['keycloak_user'],
"password": os.environ['keycloak_user_password'],
@@ -150,14 +160,21 @@
"clientId": keycloak_client_id,
}
keycloak_token = requests.post(keycloak_auth_server_url, data=keycloak_auth_data).json()
- keycloak_get_id_client = requests.get(keycloak_client_create_url, data=keycloak_auth_data, params=client_params, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"), "Content-Type": "application/json"})
+ keycloak_get_id_client = requests.get(
+ keycloak_client_create_url, data=keycloak_auth_data, params=client_params,
+ headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+ "Content-Type": "application/json"})
json_keycloak_client_id = json.loads(keycloak_get_id_client.text)
# Check, if response is not empty
if len(json_keycloak_client_id) != 0:
print('Keycloak client {} exists. Getting his required attributes.'.format(keycloak_client_id))
keycloak_id_client = json_keycloak_client_id[0]['id']
- keycloak_client_get_secret_url = ("{0}/{1}/client-secret".format(keycloak_client_create_url, keycloak_id_client))
- keycloak_client_get_secret = requests.get(keycloak_client_get_secret_url, data=keycloak_auth_data, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"), "Content-Type": "application/json"})
+ keycloak_client_get_secret_url = ("{0}/{1}/client-secret".format(keycloak_client_create_url,
+ keycloak_id_client))
+ keycloak_client_get_secret = requests.get(
+ keycloak_client_get_secret_url, data=keycloak_auth_data,
+ headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")), "Content-Type":
+ "application/json"})
json_keycloak_client_secret = json.loads(keycloak_client_get_secret.text)
keycloak_client_secret = json_keycloak_client_secret['value']
else:
@@ -169,14 +186,16 @@
"redirectUris": ["*"],
"secret": keycloak_client_secret,
}
- keycloak_client = requests.post(keycloak_client_create_url, json=keycloak_client_data, headers={"Authorization": "Bearer " + keycloak_token.get("access_token"), "Content-Type": "application/json"})
+ keycloak_client = requests.post(
+ keycloak_client_create_url, json=keycloak_client_data,
+ headers={"Authorization": "Bearer {}".format(keycloak_token.get("access_token")),
+ "Content-Type": "application/json"})
except Exception as err:
- append_result("Failed to configure keycloak.")
+ dlab.fab.append_result("Failed to configure keycloak.")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure keycloak.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure keycloak.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing and configuring superset
@@ -201,9 +220,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure superset.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure superset.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -212,16 +230,16 @@
additional_config = {"user_keyname": os.environ['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -233,35 +251,34 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
else:
- print("Looks like another image creating operation for your template have been started a moment ago.")
+ print("Looks like another image creating operation for your template have been started a "
+ "moment ago.")
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -286,12 +303,11 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -309,9 +325,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy for docker.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy for docker.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -326,47 +341,51 @@
try:
local("~/scripts/superset_start.py {}".format(params))
except:
- traceback.print_exc()
- raise Exception
+ traceback.print_exc()
+ raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to start Superset.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to start Superset.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- superset_ip_url = "http://" + ip_address + ":8088/{}/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- superset_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
- superset_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("SUPERSET URL: {}".format(superset_ip_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print("ReverseProxyNotebook".format(superset_notebook_acces_url))
- print("ReverseProxyUngit".format(superset_ungit_acces_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ superset_ip_url = "http://" + ip_address + ":8088/{}/".format(notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ superset_notebook_acces_url = "http://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
+ superset_ungit_acces_url = "http://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(os.environ['project_name']))
+ print("SUPERSET URL: {}".format(superset_ip_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print("ReverseProxyNotebook".format(superset_notebook_acces_url))
+ print("ReverseProxyUngit".format(superset_ungit_acces_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
+ notebook_config['dlab_ssh_user'],
+ ip_address))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Superset",
- "url": superset_notebook_acces_url},
- {"description": "Ungit",
- "url": superset_ungit_acces_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Superset",
+ "url": superset_notebook_acces_url},
+ {"description": "Ungit",
+ "url": superset_ungit_acces_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
index f791e74..cef61d1 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/tensor-rstudio_configure.py
@@ -24,12 +24,13 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import argparse
import traceback
+from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
@@ -43,68 +44,77 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
-
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
- notebook_config['rstudio_pass'] = id_generator()
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ notebook_config['rstudio_pass'] = dlab.fab.id_generator()
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name']),
+ notebook_config['initial_user'], notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
@@ -112,9 +122,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -131,9 +140,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
@@ -149,9 +157,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# installing and configuring TensorFlow and RSTUDIO and all dependencies
@@ -172,9 +179,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure tensoflow-rstudio.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure tensoflow-rstudio.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -183,16 +189,16 @@
additional_config = {"user_keyname": os.environ['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
- instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config), notebook_config['dlab_ssh_user'])
+ instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
+ notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -204,21 +210,20 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
@@ -229,11 +234,10 @@
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -258,62 +262,67 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- tensorboard_url = "http://" + ip_address + ":6006/"
- rstudio_ip_url = "http://" + ip_address + ":8787/"
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
- tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(notebook_config['exploratory_name'])
- rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("TensorBoard URL: {}".format(tensorboard_url))
- print("TensorBoard log dir: /var/log/tensorboard")
- print("Rstudio URL: {}".format(rstudio_ip_url))
- print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
- print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ tensorboard_url = "http://" + ip_address + ":6006/"
+ rstudio_ip_url = "http://" + ip_address + ":8787/"
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ rstudio_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+ notebook_config['exploratory_name'])
+ tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+ notebook_config['exploratory_name'])
+ rstudio_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(os.environ['project_name']))
+ print("TensorBoard URL: {}".format(tensorboard_url))
+ print("TensorBoard log dir: /var/log/tensorboard")
+ print("Rstudio URL: {}".format(rstudio_ip_url))
+ print("Rstudio user: {}".format(notebook_config['dlab_ssh_user']))
+ print("Rstudio pass: {}".format(notebook_config['rstudio_pass']))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "tensorboard_log_dir": "/var/log/tensorboard",
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Rstudio",
- "url": rstudio_notebook_access_url},
- {"description": "TensorBoard",
- "url": tensorboard_access_url},
- {"description": "Ungit",
- "url": rstudio_ungit_access_url}#,
- #{"description": "Rstudio (via tunnel)",
- # "url": rstudio_ip_url},
- #{"description": "TensorBoard (via tunnel)",
- # "url": tensorboard_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ],
- "exploratory_user": notebook_config['dlab_ssh_user'],
- "exploratory_pass": notebook_config['rstudio_pass']}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "tensorboard_log_dir": "/var/log/tensorboard",
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Rstudio",
+ "url": rstudio_notebook_access_url},
+ {"description": "TensorBoard",
+ "url": tensorboard_access_url},
+ {"description": "Ungit",
+ "url": rstudio_ungit_access_url}#,
+ #{"description": "Rstudio (via tunnel)",
+ # "url": rstudio_ip_url},
+ #{"description": "TensorBoard (via tunnel)",
+ # "url": tensorboard_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ],
+ "exploratory_user": notebook_config['dlab_ssh_user'],
+ "exploratory_pass": notebook_config['rstudio_pass']}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
index 950a1c0..613b4a1 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/tensor_configure.py
@@ -24,11 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import traceback
+from fabric.api import *
if __name__ == "__main__":
@@ -39,77 +40,84 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
- (instance_hostname, notebook_config['ssh_key_path'], initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
-
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -126,9 +134,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
@@ -144,9 +151,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# installing and configuring TensorFlow and all dependencies
@@ -163,9 +169,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure TensorFlow.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure TensorFlow.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -179,12 +184,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -196,21 +200,20 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
@@ -221,11 +224,10 @@
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -250,60 +252,63 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ tensorboard_url = "http://" + ip_address + ":6006/"
+ jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+ notebook_config['exploratory_name'])
+ tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
+ notebook_config['exploratory_name'])
+ jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(os.environ['project_name']))
+ print("TensorBoard URL: {}".format(tensorboard_url))
+ print("TensorBoard log dir: /var/log/tensorboard")
+ print("Jupyter URL: {}".format(jupyter_ip_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- tensorboard_url = "http://" + ip_address + ":6006/"
- jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- jupyter_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(notebook_config['exploratory_name'])
- tensorboard_access_url = "https://" + edge_instance_hostname + "/{}-tensor/".format(
- notebook_config['exploratory_name'])
- jupyter_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("TensorBoard URL: {}".format(tensorboard_url))
- print("TensorBoard log dir: /var/log/tensorboard")
- print("Jupyter URL: {}".format(jupyter_ip_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
-
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "tensorboard_log_dir": "/var/log/tensorboard",
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Jupyter",
- "url": jupyter_notebook_access_url},
- {"description": "TensorBoard",
- "url": tensorboard_access_url},
- {"description": "Ungit",
- "url": jupyter_ungit_access_url}#,
- #{"description": "Jupyter (via tunnel)",
- # "url": jupyter_ip_url},
- #{"description": "TensorBoard (via tunnel)",
- # "url": tensorboard_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "tensorboard_log_dir": "/var/log/tensorboard",
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Jupyter",
+ "url": jupyter_notebook_access_url},
+ {"description": "TensorBoard",
+ "url": tensorboard_access_url},
+ {"description": "Ungit",
+ "url": jupyter_ungit_access_url}#,
+ #{"description": "Jupyter (via tunnel)",
+ # "url": jupyter_ip_url},
+ #{"description": "TensorBoard (via tunnel)",
+ # "url": tensorboard_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py b/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
index 7981005..d6c4fab 100644
--- a/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
+++ b/infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py
@@ -24,11 +24,12 @@
import logging
import json
import sys
-from dlab.fab import *
-from dlab.meta_lib import *
-from dlab.actions_lib import *
+import dlab.fab
+import dlab.actions_lib
+import dlab.meta_lib
import os
import traceback
+from fabric.api import *
if __name__ == "__main__":
@@ -39,76 +40,84 @@
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
-
- notebook_config = dict()
try:
- notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
- except:
- notebook_config['exploratory_name'] = ''
- notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
- notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
- notebook_config['key_name'] = os.environ['conf_key_name']
- notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
- notebook_config['project_name'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['project_tag'] = (os.environ['project_name']).lower().replace('_', '-')
- notebook_config['endpoint_tag'] = (os.environ['endpoint_name']).lower().replace('_', '-')
- notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], os.environ['endpoint_name'],
- notebook_config['exploratory_name'])
- notebook_config['image_enabled'] = os.environ['conf_image_enabled']
- notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
- if notebook_config['shared_image_enabled'] == 'false':
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], notebook_config['project_name'],
- os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "project_tag": notebook_config['project_tag'],
- "product": "dlab"}
- else:
- notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
- notebook_config['service_base_name'], notebook_config['endpoint_tag'], os.environ['application'])
- notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
- "endpoint_tag": notebook_config['endpoint_tag'],
- "product": "dlab"}
- # generating variables regarding EDGE proxy on Notebook instance
- instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
- notebook_config['project_name'], notebook_config['endpoint_tag'])
- edge_instance_hostname = GCPMeta().get_instance_public_ip_by_name(edge_instance_name)
- edge_instance_private_ip = GCPMeta().get_private_ip_address(edge_instance_name)
- notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
- notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
- notebook_config['zone'] = os.environ['gcp_zone']
+ GCPMeta = dlab.meta_lib.GCPMeta()
+ GCPActions = dlab.actions_lib.GCPActions()
+ notebook_config = dict()
+ try:
+ notebook_config['exploratory_name'] = (os.environ['exploratory_name']).replace('_', '-').lower()
+ except:
+ notebook_config['exploratory_name'] = ''
+ notebook_config['service_base_name'] = (os.environ['conf_service_base_name'])
+ notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
+ notebook_config['key_name'] = os.environ['conf_key_name']
+ notebook_config['edge_user_name'] = (os.environ['edge_user_name'])
+ notebook_config['project_name'] = (os.environ['project_name']).replace('_', '-').lower()
+ notebook_config['project_tag'] = notebook_config['project_name']
+ notebook_config['endpoint_name'] = (os.environ['endpoint_name']).replace('_', '-').lower()
+ notebook_config['endpoint_tag'] = notebook_config['endpoint_name']
+ notebook_config['instance_name'] = '{0}-{1}-{2}-nb-{3}'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'],
+ notebook_config['exploratory_name'])
+ notebook_config['image_enabled'] = os.environ['conf_image_enabled']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ if notebook_config['shared_image_enabled'] == 'false':
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['project_name'], notebook_config['endpoint_name'],
+ os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ "project_tag": notebook_config['project_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ else:
+ notebook_config['expected_primary_image_name'] = '{}-{}-{}-primary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['expected_secondary_image_name'] = '{}-{}-{}-secondary-image'.format(
+ notebook_config['service_base_name'], notebook_config['endpoint_name'], os.environ['application'])
+ notebook_config['image_labels'] = {"sbn": notebook_config['service_base_name'],
+ "endpoint_tag": notebook_config['endpoint_tag'],
+ os.environ['conf_billing_tag_key']: os.environ['conf_billing_tag_value']}
+ # generating variables regarding EDGE proxy on Notebook instance
+ instance_hostname = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ edge_instance_name = '{0}-{1}-{2}-edge'.format(notebook_config['service_base_name'],
+ notebook_config['project_name'],
+ notebook_config['endpoint_name'])
+ edge_instance_hostname = GCPMeta.get_instance_public_ip_by_name(edge_instance_name)
+ edge_instance_private_ip = GCPMeta.get_private_ip_address(edge_instance_name)
+ notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
+ notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
+ notebook_config['zone'] = os.environ['gcp_zone']
+ notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate variables dictionary", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
try:
if os.environ['conf_os_family'] == 'debian':
- initial_user = 'ubuntu'
- sudo_group = 'sudo'
+ notebook_config['initial_user'] = 'ubuntu'
+ notebook_config['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
- initial_user = 'ec2-user'
- sudo_group = 'wheel'
+ notebook_config['initial_user'] = 'ec2-user'
+ notebook_config['sudo_group'] = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
- params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
- (instance_hostname, notebook_config['ssh_key_path'], initial_user,
- notebook_config['dlab_ssh_user'], sudo_group)
-
+ params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
+ instance_hostname, notebook_config['ssh_key_path'], notebook_config['initial_user'],
+ notebook_config['dlab_ssh_user'], notebook_config['sudo_group'])
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating ssh user 'dlab'.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed creating ssh user 'dlab'.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
@@ -125,9 +134,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure proxy.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure proxy.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
@@ -143,9 +151,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing apps: apt & pip.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing apps: apt & pip.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# installing and configuring zeppelin and all dependencies
@@ -179,9 +186,8 @@
traceback.print_exc()
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to configure zeppelin.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to configure zeppelin.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -195,12 +201,11 @@
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
- append_result("Failed installing users key")
+ dlab.fab.append_result("Failed installing users key")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed installing users key.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed installing users key.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
@@ -212,36 +217,34 @@
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
- append_result("Failed setup git credentials")
+ dlab.fab.append_result("Failed setup git credentials")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to setup git credentials.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to setup git credentials.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
-
if notebook_config['image_enabled'] == 'true':
try:
print('[CREATING IMAGE]')
- primary_image_id = GCPMeta().get_image_by_name(notebook_config['expected_primary_image_name'])
+ primary_image_id = GCPMeta.get_image_by_name(notebook_config['expected_primary_image_name'])
if primary_image_id == '':
print("Looks like it's first time we configure notebook server. Creating images.")
- image_id_list = GCPActions().create_image_from_instance_disks(
+ image_id_list = GCPActions.create_image_from_instance_disks(
notebook_config['expected_primary_image_name'], notebook_config['expected_secondary_image_name'],
notebook_config['instance_name'], notebook_config['zone'], notebook_config['image_labels'])
if image_id_list and image_id_list[0] != '':
print("Image of primary disk was successfully created. It's ID is {}".format(image_id_list[0]))
else:
- print("Looks like another image creating operation for your template have been started a moment ago.")
+ print("Looks like another image creating operation for your template have been started a "
+ "moment ago.")
if image_id_list and image_id_list[1] != '':
print("Image of secondary disk was successfully created. It's ID is {}".format(image_id_list[1]))
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed creating image.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
- GCPActions().remove_image(notebook_config['expected_primary_image_name'])
- GCPActions().remove_image(notebook_config['expected_secondary_image_name'])
+ dlab.fab.append_result("Failed creating image.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ GCPActions.remove_image(notebook_config['expected_primary_image_name'])
+ GCPActions.remove_image(notebook_config['expected_secondary_image_name'])
sys.exit(1)
try:
@@ -266,51 +269,53 @@
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
- append_result("Failed edge reverse proxy template")
+ dlab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
- print('Error: {0}'.format(err))
- append_result("Failed to set edge reverse proxy template.", str(err))
- GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ dlab.fab.append_result("Failed to set edge reverse proxy template.", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
+ try:
+ # generating output information
+ ip_address = GCPMeta.get_private_ip_address(notebook_config['instance_name'])
+ zeppelin_ip_url = "http://" + ip_address + ":8080/"
+ ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
+ zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
+ notebook_config['exploratory_name'])
+ zeppelin_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
+ notebook_config['exploratory_name'])
+ print('[SUMMARY]')
+ logging.info('[SUMMARY]')
+ print("Instance name: {}".format(notebook_config['instance_name']))
+ print("Private IP: {}".format(ip_address))
+ print("Instance type: {}".format(notebook_config['instance_type']))
+ print("Key name: {}".format(notebook_config['key_name']))
+ print("User key name: {}".format(os.environ['project_name']))
+ print("Zeppelin URL: {}".format(zeppelin_ip_url))
+ print("Ungit URL: {}".format(ungit_ip_url))
+ print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(
+ notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
- # generating output information
- ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
- zeppelin_ip_url = "http://" + ip_address + ":8080/"
- ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
- zeppelin_notebook_access_url = "https://" + edge_instance_hostname + "/{}/".format(
- notebook_config['exploratory_name'])
- zeppelin_ungit_access_url = "https://" + edge_instance_hostname + "/{}-ungit/".format(
- notebook_config['exploratory_name'])
- print('[SUMMARY]')
- logging.info('[SUMMARY]')
- print("Instance name: {}".format(notebook_config['instance_name']))
- print("Private IP: {}".format(ip_address))
- print("Instance type: {}".format(notebook_config['instance_type']))
- print("Key name: {}".format(notebook_config['key_name']))
- print("User key name: {}".format(os.environ['project_name']))
- print("Zeppelin URL: {}".format(zeppelin_ip_url))
- print("Ungit URL: {}".format(ungit_ip_url))
- print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
- notebook_config['dlab_ssh_user'],
- ip_address))
-
- with open("/root/result.json", 'w') as result:
- res = {"hostname": ip_address,
- "ip": ip_address,
- "instance_id": notebook_config['instance_name'],
- "master_keyname": os.environ['conf_key_name'],
- "notebook_name": notebook_config['instance_name'],
- "Action": "Create new notebook server",
- "exploratory_url": [
- {"description": "Apache Zeppelin",
- "url": zeppelin_notebook_access_url},
- {"description": "Ungit",
- "url": zeppelin_ungit_access_url}#,
- #{"description": "Apache Zeppelin (via tunnel)",
- # "url": zeppelin_ip_url},
- #{"description": "Ungit (via tunnel)",
- # "url": ungit_ip_url}
- ]}
- result.write(json.dumps(res))
\ No newline at end of file
+ with open("/root/result.json", 'w') as result:
+ res = {"hostname": ip_address,
+ "ip": ip_address,
+ "instance_id": notebook_config['instance_name'],
+ "master_keyname": os.environ['conf_key_name'],
+ "notebook_name": notebook_config['instance_name'],
+ "Action": "Create new notebook server",
+ "exploratory_url": [
+ {"description": "Apache Zeppelin",
+ "url": zeppelin_notebook_access_url},
+ {"description": "Ungit",
+ "url": zeppelin_ungit_access_url}#,
+ #{"description": "Apache Zeppelin (via tunnel)",
+ # "url": zeppelin_ip_url},
+ #{"description": "Ungit (via tunnel)",
+ # "url": ungit_ip_url}
+ ]}
+ result.write(json.dumps(res))
+ except Exception as err:
+ dlab.fab.append_result("Failed to generate output information", str(err))
+ GCPActions.remove_instance(notebook_config['instance_name'], notebook_config['zone'])
+ sys.exit(1)
diff --git a/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py b/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
index 622297a..ef8f4f8 100644
--- a/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
+++ b/infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
@@ -51,6 +51,7 @@
print('Error: {0}'.format(err))
sys.exit(1)
+
def clean_jupyter():
try:
sudo('systemctl stop jupyter-notebook')
@@ -67,6 +68,7 @@
print('Error: {0}'.format(err))
sys.exit(1)
+
def clean_zeppelin():
try:
sudo('systemctl stop zeppelin-notebook')
@@ -81,6 +83,7 @@
print('Error: {0}'.format(err))
sys.exit(1)
+
def clean_rstudio():
try:
remove_os_pkg(['rstudio-server'])
@@ -90,6 +93,7 @@
print('Error:', str(err))
sys.exit(1)
+
def clean_tensor():
try:
clean_jupyter()
@@ -100,6 +104,7 @@
print('Error: {0}'.format(err))
sys.exit(1)
+
def clean_tensor_rstudio():
try:
clean_rstudio()
@@ -110,6 +115,7 @@
print('Error: {0}'.format(err))
sys.exit(1)
+
if __name__ == "__main__":
print('Configure connections')
env['connection_attempts'] = 100
@@ -117,19 +123,19 @@
env.host_string = args.os_user + '@' + args.hostname
if os.environ['conf_cloud_provider'] == 'azure':
- de_master_name = '{}-{}-de-{}-{}-m'.format(
- os.environ['conf_service_base_name'],
- os.environ['project_name'].replace("_", "-"),
- os.environ['exploratory_name'].replace("_", "-"),
- os.environ['computational_name'].replace("_", "-"))
- de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
- de_master_name)
- default_ami_id = 'default'
- else:
- de_master_name = '{}-{}-de-{}-{}-m'.format(
+ de_master_name = '{}-{}-{}-de-{}-m'.format(
os.environ['conf_service_base_name'],
os.environ['project_name'],
- os.environ['exploratory_name'],
+ os.environ['endpoint_name'],
+ os.environ['computational_name'])
+ de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
+ de_master_name)
+ default_ami_id = 'default'
+ else:
+ de_master_name = '{}-{}-{}-de-{}-m'.format(
+ os.environ['conf_service_base_name'],
+ os.environ['project_name'],
+ os.environ['endpoint_name'],
os.environ['computational_name'])
de_ami_id = get_ami_id_by_instance_name(de_master_name)
default_ami_id = get_ami_id(
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
index 089e316..f8729f1 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_install_libs.py
@@ -63,7 +63,7 @@
try:
data_engine['os_user'] = os.environ['conf_os_user']
data_engine['service_base_name'] = os.environ['conf_service_base_name']
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['cluster_name'] = os.environ['computational_id']
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
index 0cc5f6e..9f2b18b 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_list_libs.py
@@ -46,7 +46,7 @@
try:
data_engine['os_user'] = os.environ['conf_os_user']
data_engine['service_base_name'] = os.environ['conf_service_base_name']
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['cluster_name'] = os.environ['computational_id']
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['master_ip'] = get_instance_private_ip_address(
diff --git a/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py b/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
index 006a313..425b12b 100644
--- a/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
+++ b/infrastructure-provisioning/src/general/scripts/os/dataengine_reconfigure_spark.py
@@ -63,7 +63,7 @@
try:
data_engine['os_user'] = os.environ['conf_os_user']
data_engine['service_base_name'] = os.environ['conf_service_base_name']
- data_engine['tag_name'] = data_engine['service_base_name'] + '-Tag'
+ data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['cluster_name'] = os.environ['computational_id']
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py b/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
index 23f889f..eaf9ea5 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_git_creds.py
@@ -45,8 +45,8 @@
notebook_config = dict()
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['os_user'] = os.environ['conf_os_user']
- notebook_config['service_base_name'] = os.environ['conf_service_base_name']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['notebook_ip'] = get_instance_private_ip_address(
notebook_config['tag_name'], notebook_config['notebook_name'])
notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py b/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
index 59074f8..5e51179 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_inactivity_check.py
@@ -44,8 +44,8 @@
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['os_user'] = os.environ['conf_os_user']
notebook_config['resource_type'] = os.environ['conf_resource']
- notebook_config['service_base_name'] = os.environ['conf_service_base_name']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['notebook_ip'] = get_instance_private_ip_address(
notebook_config['tag_name'], notebook_config['notebook_name'])
notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py b/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
index 17abe27..b56157a 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_install_libs.py
@@ -47,8 +47,8 @@
try:
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['os_user'] = os.environ['conf_os_user']
- notebook_config['service_base_name'] = os.environ['conf_service_base_name']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['notebook_ip'] = get_instance_private_ip_address(
notebook_config['tag_name'], notebook_config['notebook_name'])
notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py b/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
index 839f3f9..820c818 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_list_libs.py
@@ -46,8 +46,8 @@
try:
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['os_user'] = os.environ['conf_os_user']
- notebook_config['service_base_name'] = os.environ['conf_service_base_name']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['service_base_name'] = os.environ['conf_service_base_name'].lower()
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['notebook_ip'] = get_instance_private_ip_address(
notebook_config['tag_name'], notebook_config['notebook_name'])
notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py b/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
index 849333c..8bc607c 100644
--- a/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
+++ b/infrastructure-provisioning/src/general/scripts/os/notebook_reconfigure_spark.py
@@ -48,7 +48,7 @@
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['os_user'] = os.environ['conf_os_user']
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
- notebook_config['tag_name'] = notebook_config['service_base_name'] + '-Tag'
+ notebook_config['tag_name'] = notebook_config['service_base_name'] + '-tag'
notebook_config['notebook_ip'] = get_instance_private_ip_address(
notebook_config['tag_name'], notebook_config['notebook_name'])
notebook_config['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
diff --git a/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh b/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
index 90f0e5f..ff3e46d 100644
--- a/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
+++ b/infrastructure-provisioning/src/general/templates/os/renew_certificates.sh
@@ -40,4 +40,4 @@
keytool -importcert -trustcacerts -alias step-ca -file /etc/ssl/certs/root_ca.crt -noprompt -storepass changeit -keystore JAVA_HOME/lib/security/cacerts
# Restarting service
-supervisorctl restart provserv
\ No newline at end of file
+supervisorctl restart all
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py b/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
index 3cfa0ac..94ad123 100644
--- a/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
+++ b/infrastructure-provisioning/src/jupyter/scripts/configure_jupyter_node.py
@@ -38,7 +38,7 @@
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--scala_version', type=str, default='')
parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
parser.add_argument('--exploratory_name', type=str, default='')
parser.add_argument('--edge_ip', type=str, default='')
args = parser.parse_args()
@@ -134,7 +134,7 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress)
+ install_inactivity_checker(args.os_user, args.ip_address)
# INSTALL OPTIONAL PACKAGES
print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py b/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
index 5f56ea8..1486ff3 100644
--- a/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
+++ b/infrastructure-provisioning/src/jupyterlab/scripts/configure_jupyterlab_node.py
@@ -39,7 +39,7 @@
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--scala_version', type=str, default='')
parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
parser.add_argument('--exploratory_name', type=str, default='')
args = parser.parse_args()
@@ -113,4 +113,4 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress)
\ No newline at end of file
+ install_inactivity_checker(args.os_user, args.ip_address)
\ No newline at end of file
diff --git a/infrastructure-provisioning/src/project/fabfile.py b/infrastructure-provisioning/src/project/fabfile.py
index 5949469..385704e 100644
--- a/infrastructure-provisioning/src/project/fabfile.py
+++ b/infrastructure-provisioning/src/project/fabfile.py
@@ -45,13 +45,6 @@
append_result("Failed preparing Project.", str(err))
sys.exit(1)
-# try:
-# local("~/scripts/{}.py".format('edge_prepare'))
-# except Exception as err:
-# traceback.print_exc()
-# append_result("Failed preparing Edge node.", str(err))
-# sys.exit(1)
-
try:
local("~/scripts/{}.py".format('edge_configure'))
except Exception as err:
diff --git a/infrastructure-provisioning/src/project/scripts/configure_keycloak.py b/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
index a40bad2..80e7501 100644
--- a/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
+++ b/infrastructure-provisioning/src/project/scripts/configure_keycloak.py
@@ -65,10 +65,11 @@
keycloak_client_name = "{0}-{1}-{2}".format(args.service_base_name, args.project_name, args.endpoint_name)
keycloak_client_id = str(uuid.uuid4())
if args.hostname == '':
- keycloak_redirectUris = 'https://{0}/*,http://{0}/*'.format(args.edge_public_ip).split(',')
+ keycloak_redirectUris = 'https://{0}/*,http://{0}/*'.format(args.edge_public_ip).lower().split(',')
print(keycloak_redirectUris)
else:
- keycloak_redirectUris = 'https://{0}/*,http://{0}/*,https://{1}/*,http://{1}/*'.format(args.edge_public_ip, args.hostname).split(',')
+ keycloak_redirectUris = 'https://{0}/*,http://{0}/*,https://{1}/*,http://{1}/*'.format(
+ args.edge_public_ip, args.hostname).lower().split(',')
keycloak_client_data = {
"clientId": keycloak_client_name,
"id": keycloak_client_id,
diff --git a/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py b/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
index a2878b8..34fb007 100644
--- a/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
+++ b/infrastructure-provisioning/src/rstudio/scripts/configure_rstudio_node.py
@@ -40,7 +40,7 @@
parser.add_argument('--rstudio_pass', type=str, default='')
parser.add_argument('--rstudio_version', type=str, default='')
parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
parser.add_argument('--exploratory_name', type=str, default='')
parser.add_argument('--edge_ip', type=str, default='')
args = parser.parse_args()
@@ -113,7 +113,7 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress, True)
+ install_inactivity_checker(args.os_user, args.ip_address, True)
#POST INSTALLATION PROCESS
print("Updating pyOpenSSL library")
diff --git a/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json b/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
index b0a4a0a..a001da2 100644
--- a/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
+++ b/infrastructure-provisioning/src/ssn/files/azure/mongo_roles.json
@@ -1,7 +1,7 @@
[
{
"_id": "nbShapes_Standard_NC6_fetching",
- "description": "Allow to use Standard_NC6 instance shape for notebook",
+ "description": "Use Standard_NC6 instance shape for notebook",
"exploratory_shapes": [
"Standard_NC6"
],
diff --git a/infrastructure-provisioning/src/ssn/scripts/configure_ui.py b/infrastructure-provisioning/src/ssn/scripts/configure_ui.py
index ad3b7ed..fdca046 100644
--- a/infrastructure-provisioning/src/ssn/scripts/configure_ui.py
+++ b/infrastructure-provisioning/src/ssn/scripts/configure_ui.py
@@ -149,7 +149,7 @@
'\'use_ldap\': false'))
sudo('echo "N" | npm install')
- sudo('npm run build.prod')
+ manage_npm_pkg('run build.prod')
sudo('sudo chown -R {} {}/*'.format(args.os_user, args.dlab_path))
# Building Back-end
diff --git a/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py b/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
index a156f2f..7a1a359 100644
--- a/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
+++ b/infrastructure-provisioning/src/superset/scripts/configure_superset_node.py
@@ -42,7 +42,7 @@
parser.add_argument('--edge_instance_private_ip', type=str, default='')
parser.add_argument('--edge_instance_public_ip', type=str, default='')
parser.add_argument('--superset_name', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
args = parser.parse_args()
gitlab_certfile = os.environ['conf_gitlab_certfile']
@@ -81,7 +81,7 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress)
+ install_inactivity_checker(args.os_user, args.ip_address)
# PREPARE SUPERSET
try:
diff --git a/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py b/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
index fb579ad..93d8b55 100644
--- a/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
+++ b/infrastructure-provisioning/src/tensor-rstudio/scripts/configure_tensor-rstudio_node.py
@@ -40,7 +40,7 @@
parser.add_argument('--rstudio_pass', type=str, default='')
parser.add_argument('--rstudio_version', type=str, default='')
parser.add_argument('--r_mirror', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
parser.add_argument('--exploratory_name', type=str, default='')
parser.add_argument('--edge_ip', type=str, default='')
args = parser.parse_args()
@@ -131,7 +131,7 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress)
+ install_inactivity_checker(args.os_user, args.ip_address)
# POST INSTALLATION PROCESS
print("Updating pyOpenSSL library")
diff --git a/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py b/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
index 43e220e..b595d9e 100644
--- a/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
+++ b/infrastructure-provisioning/src/tensor/scripts/configure_tensor_node.py
@@ -37,7 +37,7 @@
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
parser.add_argument('--exploratory_name', type=str, default='')
parser.add_argument('--edge_ip', type=str, default='')
args = parser.parse_args()
@@ -134,7 +134,7 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress)
+ install_inactivity_checker(args.os_user, args.ip_address)
# INSTALL OPTIONAL PACKAGES
print("Installing additional Python packages")
diff --git a/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py b/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
index 9a8be88..fda8b1f 100644
--- a/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
+++ b/infrastructure-provisioning/src/zeppelin/scripts/configure_zeppelin_node.py
@@ -50,7 +50,7 @@
parser.add_argument('--multiple_clusters', type=str, default='')
parser.add_argument('--r_mirror', type=str, default='')
parser.add_argument('--endpoint_url', type=str, default='')
-parser.add_argument('--ip_adress', type=str, default='')
+parser.add_argument('--ip_address', type=str, default='')
parser.add_argument('--exploratory_name', type=str, default='')
parser.add_argument('--edge_ip', type=str, default='')
args = parser.parse_args()
@@ -256,7 +256,7 @@
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
- install_inactivity_checker(args.os_user, args.ip_adress)
+ install_inactivity_checker(args.os_user, args.ip_address)
# INSTALL OPTIONAL PACKAGES
if os.environ['notebook_r_enabled'] == 'true':
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
index 1c7117f..aac7afb 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/ami/ami.tf
@@ -28,7 +28,7 @@
source_instance_id = var.source_instance_id
tags {
Name = local.ami_name
- "${var.sbn}-Tag" = local.ami_name
+ "${var.sbn}-tag" = local.ami_name
Product = var.product
Project_name = var.project_name
Project_tag = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
index 6624f30..259bb6c 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/iam.tf
@@ -46,7 +46,7 @@
tags = {
Name = local.role_name
Environment_tag = var.sbn
- "${var.sbn}-Tag" = local.role_name
+ "${var.sbn}-tag" = local.role_name
Product = var.product
Project_name = var.project_name
Project_tag = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
index 297cf28..2b3c1fb 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/common/network.tf
@@ -30,7 +30,7 @@
tags = {
Name = local.subnet_name
- "${var.sbn}-Tag" = local.subnet_name
+ "${var.sbn}-tag" = local.subnet_name
Product = var.product
Project_name = var.project_name
Project_tag = var.project_tag
@@ -68,7 +68,7 @@
tags = {
Name = local.sg_name
- "${var.sbn}-Tag" = local.sg_name
+ "${var.sbn}-tag" = local.sg_name
Product = var.product
Project_name = var.project_name
Project_tag = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
index 7601e35..12532c1 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/data_engine/instance.tf
@@ -35,7 +35,7 @@
Name = "${local.cluster_name}-m"
Type = "master"
dataengine_notebook_name = local.notebook_name
- "${var.sbn}-Tag" = "${local.cluster_name}-m"
+ "${var.sbn}-tag" = "${local.cluster_name}-m"
Product = var.product
Project_name = var.project_name
Project_tag = var.project_tag
@@ -59,7 +59,7 @@
Name = "${local.cluster_name}-s${count.index + 1}"
Type = "slave"
dataengine_notebook_name = local.notebook_name
- "${var.sbn}-Tag" = "${local.cluster_name}-s${count.index + 1}"
+ "${var.sbn}-tag" = "${local.cluster_name}-s${count.index + 1}"
Product = var.product
Project_name = var.project_name
Project_tag = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
index 4a03b2d..10f5506 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/emr/instance.tf
@@ -63,7 +63,7 @@
Name = local.cluster_name
Notebook = local.notebook_name
Product = var.product
- "${var.sbn}-Tag" = local.cluster_name
+ "${var.sbn}-tag" = local.cluster_name
Project_name = var.project_name
Project_tag = var.project_tag
User_tag = var.user_tag
diff --git a/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf b/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
index 374d6da..64d1d4f 100644
--- a/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/computational_resources/modules/notebook/instance.tf
@@ -32,7 +32,7 @@
iam_instance_profile = var.iam_profile_name
tags = {
Name = local.node_name
- "${var.sbn}-Tag" = local.node_name
+ "${var.sbn}-tag" = local.node_name
Project_name = var.project_name
Project_tag = var.project_tag
Endpoint_Tag = var.endpoint_tag
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf
new file mode 100644
index 0000000..8a930e0
--- /dev/null
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/bucket.tf
@@ -0,0 +1,37 @@
+# *****************************************************************************
+ #
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements. See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership. The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License. You may obtain a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing,
+ # software distributed under the License is distributed on an
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ # KIND, either express or implied. See the License for the
+ # specific language governing permissions and limitations
+ # under the License.
+ #
+ # ******************************************************************************
+
+ locals {
+ shared_s3_name = "${var.service_base_name}-${var.endpoint_id}-shared-bucket"
+ }
+
+ resource "aws_s3_bucket" "shared_bucket" {
+ bucket = local.shared_s3_name
+ acl = "private"
+ tags = {
+ Name = local.shared_s3_name
+ "${local.additional_tag[0]}" = local.additional_tag[1]
+ "${var.tag_resource_id}" = "${var.service_base_name}:${local.shared_s3_name}"
+ "${var.service_base_name}-tag" = local.shared_s3_name
+ "endpoint_tag" = var.endpoint_id
+ }
+ force_destroy = true
+ }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
index 49d2353..e4c1e69 100644
--- a/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/iam.tf
@@ -36,7 +36,7 @@
Name = local.endpoint_role_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_role_name}"
- "${var.service_base_name}-Tag" = local.endpoint_role_name
+ "${var.service_base_name}-tag" = local.endpoint_role_name
}
}
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
index 5c023f5..6bfc09b 100644
--- a/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/instance.tf
@@ -39,7 +39,7 @@
Name = local.endpoint_instance_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_instance_name}"
- "${var.service_base_name}-Tag" = local.endpoint_instance_name
+ "${var.service_base_name}-tag" = local.endpoint_instance_name
endpoint_id = var.endpoint_id
}
}
diff --git a/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf b/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
index 4e0fc05..661080f 100644
--- a/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
+++ b/infrastructure-provisioning/terraform/aws/endpoint/main/network.tf
@@ -22,10 +22,10 @@
locals {
endpoint_subnet_name = "${var.service_base_name}-${var.endpoint_id}-subnet"
endpoint_sg_name = "${var.service_base_name}-${var.endpoint_id}-sg"
- endpoint_vpc_name = "${var.service_base_name}-endpoint-vpc"
+ endpoint_vpc_name = "${var.service_base_name}-${var.endpoint_id}-vpc"
additional_tag = split(":", var.additional_tag)
endpoint_igw_name = "${var.service_base_name}-${var.endpoint_id}-igw"
- endpoint_ip_name = "${var.service_base_name}-${var.endpoint_id}-eip"
+ endpoint_ip_name = "${var.service_base_name}-${var.endpoint_id}-static-ip"
projects_rt = "${var.service_base_name}-${var.endpoint_id}-project-rt"
}
@@ -40,7 +40,7 @@
Name = local.endpoint_vpc_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_vpc_name}"
- "${var.service_base_name}-Tag" = local.endpoint_vpc_name
+ "${var.service_base_name}-tag" = local.endpoint_vpc_name
}
}
@@ -55,7 +55,7 @@
Name = local.endpoint_igw_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_igw_name}"
- "${var.service_base_name}-Tag" = local.endpoint_igw_name
+ "${var.service_base_name}-tag" = local.endpoint_igw_name
}
}
@@ -67,7 +67,7 @@
Name = local.endpoint_subnet_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_subnet_name}"
- "${var.service_base_name}-Tag" = local.endpoint_subnet_name
+ "${var.service_base_name}-tag" = local.endpoint_subnet_name
}
count = var.vpc_id == "" ? 1 : 0
}
@@ -125,7 +125,7 @@
Name = local.endpoint_sg_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_sg_name}"
- "${var.service_base_name}-Tag" = local.endpoint_sg_name
+ "${var.service_base_name}-tag" = local.endpoint_sg_name
}
}
@@ -135,7 +135,7 @@
Name = local.endpoint_ip_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_ip_name}"
- "${var.service_base_name}-Tag" = local.endpoint_ip_name
+ "${var.service_base_name}-tag" = local.endpoint_ip_name
}
}
@@ -145,8 +145,8 @@
Name = local.projects_rt
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.projects_rt}"
- "${var.service_base_name}-Tag" = local.projects_rt
- "${var.service_base_name}-Tag" = var.service_base_name
+ "${var.service_base_name}-tag" = local.projects_rt
+ "${var.service_base_name}-tag" = var.service_base_name
}
}
diff --git a/infrastructure-provisioning/terraform/aws/project/main/iam.tf b/infrastructure-provisioning/terraform/aws/project/main/iam.tf
index 42fc02b..5aa83e2 100644
--- a/infrastructure-provisioning/terraform/aws/project/main/iam.tf
+++ b/infrastructure-provisioning/terraform/aws/project/main/iam.tf
@@ -50,7 +50,7 @@
Name = "${local.edge_role_name}"
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.edge_role_name}"
- "${var.service_base_name}-Tag" = local.edge_role_name
+ "${var.service_base_name}-tag" = local.edge_role_name
}
}
@@ -80,7 +80,7 @@
tags = {
Name = local.nb_role_name
Environment_tag = var.service_base_name
- "${var.service_base_name}-Tag" = local.nb_role_name
+ "${var.service_base_name}-tag" = local.nb_role_name
"${local.additional_tag[0]}" = local.additional_tag[1]
Project_name = var.project_name
Project_tag = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/project/main/instance.tf b/infrastructure-provisioning/terraform/aws/project/main/instance.tf
index 7b4cddc..1220743 100644
--- a/infrastructure-provisioning/terraform/aws/project/main/instance.tf
+++ b/infrastructure-provisioning/terraform/aws/project/main/instance.tf
@@ -39,7 +39,7 @@
Name = local.edge_instance_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.edge_instance_name}"
- "${var.service_base_name}-Tag" = local.edge_instance_name
+ "${var.service_base_name}-tag" = local.edge_instance_name
"Endpoint_tag" = var.endpoint_tag
}
}
diff --git a/infrastructure-provisioning/terraform/aws/project/main/network.tf b/infrastructure-provisioning/terraform/aws/project/main/network.tf
index d1064cd..aac8339 100644
--- a/infrastructure-provisioning/terraform/aws/project/main/network.tf
+++ b/infrastructure-provisioning/terraform/aws/project/main/network.tf
@@ -38,7 +38,7 @@
Name = local.edge_ip_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.edge_ip_name}"
- "${var.service_base_name}-Tag" = local.edge_ip_name
+ "${var.service_base_name}-tag" = local.edge_ip_name
}
}
@@ -211,7 +211,7 @@
Name = local.edge_sg_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.edge_sg_name}"
- "${var.service_base_name}-Tag" = local.edge_sg_name
+ "${var.service_base_name}-tag" = local.edge_sg_name
}
}
@@ -225,7 +225,7 @@
tags = {
Name = local.nb_subnet_name
- "${local.sbn}-Tag" = local.nb_subnet_name
+ "${local.sbn}-tag" = local.nb_subnet_name
"${local.additional_tag[0]}" = local.additional_tag[1]
Project_name = var.project_name
Project_tag = var.project_tag
@@ -263,7 +263,7 @@
tags = {
Name = local.sg_name
- "${local.sbn}-Tag" = local.sg_name
+ "${local.sbn}-tag" = local.sg_name
"${local.additional_tag[0]}" = local.additional_tag[1]
Project_name = var.project_name
Project_tag = var.project_tag
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
index 792c950..41dfb20 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/auto_scaling_groups.tf
@@ -21,8 +21,8 @@
locals {
subnet_c_id = data.aws_subnet.k8s-subnet-c-data == [] ? "" : data.aws_subnet.k8s-subnet-c-data.0.id
- ssn_k8s_launch_conf_masters_name = "${var.service_base_name}-ssn-launch-conf-masters"
- ssn_k8s_launch_conf_workers_name = "${var.service_base_name}-ssn-launch-conf-workers"
+ ssn_k8s_launch_conf_masters_name = "${var.service_base_name}-ssn-lc-masters"
+ ssn_k8s_launch_conf_workers_name = "${var.service_base_name}-ssn-lc-workers"
ssn_k8s_ag_masters_name = "${var.service_base_name}-ssn-masters"
ssn_k8s_ag_workers_name = "${var.service_base_name}-ssn-workers"
cluster_name = "${var.service_base_name}-k8s-cluster"
@@ -122,7 +122,7 @@
propagate_at_launch = true
},
{
- key = "${var.service_base_name}-Tag"
+ key = "${var.service_base_name}-tag"
value = local.ssn_k8s_ag_masters_name
propagate_at_launch = true
},
@@ -162,7 +162,7 @@
propagate_at_launch = true
},
{
- key = "${var.service_base_name}-Tag"
+ key = "${var.service_base_name}-tag"
value = local.ssn_k8s_ag_workers_name
propagate_at_launch = true
},
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
index 4000cdd..bd0baf8 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/lb.tf
@@ -36,7 +36,7 @@
Name = local.ssn_nlb_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_nlb_name}"
- "${var.service_base_name}-Tag" = local.ssn_nlb_name
+ "${var.service_base_name}-tag" = local.ssn_nlb_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
@@ -50,7 +50,7 @@
Name = local.ssn_k8s_nlb_api_tg_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_k8s_nlb_api_tg_name}"
- "${var.service_base_name}-Tag" = local.ssn_k8s_nlb_api_tg_name
+ "${var.service_base_name}-tag" = local.ssn_k8s_nlb_api_tg_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
@@ -64,7 +64,7 @@
Name = local.ssn_k8s_nlb_step_ca_tg_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_k8s_nlb_step_ca_tg_name}"
- "${var.service_base_name}-Tag" = local.ssn_k8s_nlb_step_ca_tg_name
+ "${var.service_base_name}-tag" = local.ssn_k8s_nlb_step_ca_tg_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
index 79c5969..e01b1d6 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/role_policy.tf
@@ -41,7 +41,7 @@
Name = local.ssn_role_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_role_name}"
- "${var.service_base_name}-Tag" = local.ssn_role_name
+ "${var.service_base_name}-tag" = local.ssn_role_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
index 622956b..f91e6ca 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/s3.tf
@@ -30,7 +30,7 @@
Name = local.ssn_s3_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_s3_name}"
- "${var.service_base_name}-Tag" = local.ssn_s3_name
+ "${var.service_base_name}-tag" = local.ssn_s3_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
force_destroy = true
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
index 54c1648..b9f7fa8 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/security_groups.tf
@@ -59,7 +59,7 @@
Name = local.ssn_sg_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_sg_name}"
- "${var.service_base_name}-Tag" = local.ssn_sg_name
+ "${var.service_base_name}-tag" = local.ssn_sg_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
index 2ce9d08..699dfcd 100644
--- a/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
+++ b/infrastructure-provisioning/terraform/aws/ssn-k8s/main/vpc.tf
@@ -21,8 +21,8 @@
locals {
additional_tag = split(":", var.additional_tag)
- ssn_vpc_name = "${var.service_base_name}-ssn-vpc"
- ssn_igw_name = "${var.service_base_name}-ssn-igw"
+ ssn_vpc_name = "${var.service_base_name}-vpc"
+ ssn_igw_name = "${var.service_base_name}-igw"
ssn_subnet_a_name = "${var.service_base_name}-ssn-subnet-az-a"
ssn_subnet_b_name = "${var.service_base_name}-ssn-subnet-az-b"
ssn_subnet_c_name = "${var.service_base_name}-ssn-subnet-az-c"
@@ -41,7 +41,7 @@
Name = local.ssn_vpc_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_vpc_name}"
- "${var.service_base_name}-Tag" = local.ssn_vpc_name
+ "${var.service_base_name}-tag" = local.ssn_vpc_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
@@ -54,7 +54,7 @@
Name = local.ssn_igw_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_igw_name}"
- "${var.service_base_name}-Tag" = local.ssn_igw_name
+ "${var.service_base_name}-tag" = local.ssn_igw_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
@@ -81,7 +81,7 @@
Name = local.ssn_subnet_a_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_subnet_a_name}"
- "${var.service_base_name}-Tag" = local.ssn_subnet_a_name
+ "${var.service_base_name}-tag" = local.ssn_subnet_a_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
@@ -97,7 +97,7 @@
Name = local.ssn_subnet_b_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_subnet_b_name}"
- "${var.service_base_name}-Tag" = local.ssn_subnet_b_name
+ "${var.service_base_name}-tag" = local.ssn_subnet_b_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
@@ -113,7 +113,7 @@
Name = local.ssn_subnet_c_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.ssn_subnet_c_name}"
- "${var.service_base_name}-Tag" = local.ssn_subnet_c_name
+ "${var.service_base_name}-tag" = local.ssn_subnet_c_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
@@ -135,7 +135,7 @@
vpc_id = data.aws_vpc.ssn_k8s_vpc_data.id
tags = {
Name = local.endpoint_rt_name
- "${var.service_base_name}-Tag" = var.service_base_name
+ "${var.service_base_name}-tag" = var.service_base_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_rt_name}"
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
@@ -149,7 +149,7 @@
Name = local.endpoint_s3_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_s3_name}"
- "${var.service_base_name}-Tag" = local.endpoint_s3_name
+ "${var.service_base_name}-tag" = local.endpoint_s3_name
"kubernetes.io/cluster/${local.cluster_name}" = "owned"
}
}
diff --git a/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf b/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
index 470e474..d76a16c 100644
--- a/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/computational_resources/modules/data_engine/instance.tf
@@ -57,7 +57,7 @@
vm_size = var.master_shape
storage_os_disk {
- name = "${local.cluster_name}-m-disk0"
+ name = "${local.cluster_name}-m-volume-primary"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
@@ -129,7 +129,7 @@
vm_size = var.slave_shape
storage_os_disk {
- name = "${local.notebook_name}-s-${count.index + 1}-disk0"
+ name = "${local.notebook_name}-s-${count.index + 1}-volume-primary"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
diff --git a/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf b/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
index 0283038..70e1db5 100644
--- a/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/computational_resources/modules/notebook/instance.tf
@@ -58,7 +58,7 @@
vm_size = var.instance_type
storage_os_disk {
- name = "${local.node_name}-disk0"
+ name = "${local.node_name}-volume-primary"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
@@ -106,7 +106,7 @@
vm_size = var.instance_type
storage_os_disk {
- name = "${local.node_name}-disk0"
+ name = "${local.node_name}-volume-primary"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf
new file mode 100644
index 0000000..a44a37f
--- /dev/null
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/bucket.tf
@@ -0,0 +1,53 @@
+# *****************************************************************************
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# ******************************************************************************
+
+locals {
+ shared_bucket_name = lower("${var.service_base_name}-${var.endpoint_id}-shared-bucket")
+}
+
+resource "random_string" "shared_bucket_service_name" {
+ length = 10
+ special = false
+ lower = true
+ upper = false
+}
+
+resource "azurerm_storage_account" "shared-endpoint-storage-account" {
+ name = random_string.shared_bucket_service_name.result
+ resource_group_name = data.azurerm_resource_group.data-endpoint-resource-group.name
+ location = data.azurerm_resource_group.data-endpoint-resource-group.location
+ account_tier = "Standard"
+ account_replication_type = "LRS"
+ account_kind = "BlobStorage"
+
+ tags = {
+ Name = local.shared_bucket_name
+ "${local.additional_tag[0]}" = local.additional_tag[1]
+ "${var.service_base_name}-tag" = local.shared_bucket_name
+ "endpoint_tag" = var.endpoint_id
+ }
+}
+
+resource "azurerm_storage_container" "shared-endpoint-storage-container" {
+ name = local.shared_bucket_name
+ storage_account_name = azurerm_storage_account.shared-endpoint-storage-account.name
+ container_access_type = "private"
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
index 989c32c..82c1497 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/instance.tf
@@ -21,7 +21,7 @@
locals {
endpoint_instance_name = "${var.service_base_name}-${var.endpoint_id}-endpoint"
- endpoint_instance_disk_name = "${var.service_base_name}-${var.endpoint_id}-endpoint-disk"
+ endpoint_instance_disk_name = "${var.service_base_name}-${var.endpoint_id}-endpoint-volume"
}
data "tls_public_key" "enpoint_key" {
@@ -65,6 +65,6 @@
Name = local.endpoint_instance_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_instance_name}"
- "${var.service_base_name}-Tag" = local.endpoint_instance_name
+ "${var.service_base_name}-tag" = local.endpoint_instance_name
}
}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
index 5d832fa..cbf2187 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/main.tf
@@ -20,10 +20,12 @@
# ******************************************************************************
locals {
- json_data = jsondecode(file(var.auth_file_path))
+ resource_group_name = "${var.service_base_name}-${var.endpoint_id}-resource-group"
+ json_data = jsondecode(file(var.auth_file_path))
}
provider "azurerm" {
+ features {}
subscription_id = local.json_data.subscriptionId
client_id = local.json_data.clientId
client_secret = local.json_data.clientSecret
@@ -31,8 +33,8 @@
}
resource "azurerm_resource_group" "endpoint-resource-group" {
- count = var.resource_group_name == "" ? 1 : 0
- name = var.service_base_name
+ count = var.resource_group_name == "" ? 1 : 0
+ name = local.resource_group_name
location = var.region
tags = {
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
index ac782f9..738f062 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/network.tf
@@ -21,9 +21,9 @@
locals {
endpoint_subnet_name = "${var.service_base_name}-${var.endpoint_id}-subnet"
- endpoint_vpc_name = "${var.service_base_name}-endpoint-vpc"
+ endpoint_vpc_name = "${var.service_base_name}-${var.endpoint_id}-vpc"
additional_tag = split(":", var.additional_tag)
- endpoint_ip_name = "${var.service_base_name}-${var.endpoint_id}-eip"
+ endpoint_ip_name = "${var.service_base_name}-${var.endpoint_id}-static-ip"
endpoint_nif_name = "${var.service_base_name}-${var.endpoint_id}-nif"
}
@@ -38,7 +38,7 @@
Name = local.endpoint_vpc_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_vpc_name}"
- "${var.service_base_name}-Tag" = local.endpoint_vpc_name
+ "${var.service_base_name}-tag" = local.endpoint_vpc_name
}
}
@@ -71,7 +71,7 @@
Name = local.endpoint_ip_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_ip_name}"
- "${var.service_base_name}-Tag" = local.endpoint_ip_name
+ "${var.service_base_name}-tag" = local.endpoint_ip_name
}
}
@@ -79,7 +79,6 @@
name = local.endpoint_nif_name
location = data.azurerm_resource_group.data-endpoint-resource-group.location
resource_group_name = data.azurerm_resource_group.data-endpoint-resource-group.name
- network_security_group_id = azurerm_network_security_group.enpoint-sg.id
ip_configuration {
name = "configuration"
@@ -93,6 +92,12 @@
Name = local.endpoint_nif_name
"${local.additional_tag[0]}" = local.additional_tag[1]
"${var.tag_resource_id}" = "${var.service_base_name}:${local.endpoint_nif_name}"
- "${var.service_base_name}-Tag" = local.endpoint_nif_name
+ "${var.service_base_name}-tag" = local.endpoint_nif_name
}
}
+
+resource "azurerm_network_interface_security_group_association" "endpoint-nif-sg" {
+ network_interface_id = azurerm_network_interface.endpoint-nif.id
+ network_security_group_id = azurerm_network_security_group.enpoint-sg.id
+ depends_on = [azurerm_virtual_machine.endpoint_instance]
+}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf b/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
index 793917d..c005b29 100644
--- a/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
+++ b/infrastructure-provisioning/terraform/azure/endpoint/main/outputs.tf
@@ -33,4 +33,12 @@
output "ssn_k8s_sg_id" {
value = azurerm_network_security_group.enpoint-sg.name
+}
+
+output "endpoint_id" {
+ value = var.endpoint_id
+}
+
+output "resource_group_name" {
+ value = data.azurerm_resource_group.data-endpoint-resource-group.name
}
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/azure/project/main/instance.tf b/infrastructure-provisioning/terraform/azure/project/main/instance.tf
index d7ec3aa..34cd26b 100644
--- a/infrastructure-provisioning/terraform/azure/project/main/instance.tf
+++ b/infrastructure-provisioning/terraform/azure/project/main/instance.tf
@@ -59,7 +59,7 @@
vm_size = var.instance_type
storage_os_disk {
- name = "${local.node_name}-disk0"
+ name = "${local.node_name}-volume-primary"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
diff --git a/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
index 00111db..814cad8 100644
--- a/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
+++ b/infrastructure-provisioning/terraform/bin/deploy/endpoint_fab.py
@@ -320,7 +320,7 @@
conn.put('./provisioning.yml', '{}provisioning.yml'
.format(dlab_conf_dir))
if args.resource_group_name == '':
- args.resource_group_name = args.service_base_name
+ args.resource_group_name = '{}-{}-resource-group'.format(args.service_base_name, args.endpoint_id)
if args.cloud_provider == 'azure':
args.region = args.region.lower().replace(' ', '')
cloud_properties = [
@@ -654,6 +654,7 @@
parser.add_argument('--dlab_path', type=str, default='/opt/dlab')
parser.add_argument('--key_name', type=str, default='', help='Name of admin key without .pem extension')
parser.add_argument('--endpoint_eip_address', type=str)
+ parser.add_argument('--endpoint_id', type=str, default='')
parser.add_argument('--pkey', type=str, default='')
parser.add_argument('--hostname', type=str, default='')
parser.add_argument('--os_user', type=str, default='dlab-user')
diff --git a/infrastructure-provisioning/terraform/bin/dlab.py b/infrastructure-provisioning/terraform/bin/dlab.py
index 07214c8..bd24425 100644
--- a/infrastructure-provisioning/terraform/bin/dlab.py
+++ b/infrastructure-provisioning/terraform/bin/dlab.py
@@ -475,8 +475,8 @@
def validate_params(self):
params = self.parse_args()[self.terraform_args_group_name]
- if len(params.get('service_base_name')) > 12:
- sys.stderr.write('service_base_name length should be less then 12')
+ if len(params.get('service_base_name')) > 20:
+ sys.stderr.write('service_base_name length should be less then 20')
sys.exit(1)
if not re.match("^[a-z0-9\-]+$", params.get('service_base_name')):
sys.stderr.write('service_base_name should contain only lowercase '
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf
new file mode 100644
index 0000000..9551d65
--- /dev/null
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/bucket.tf
@@ -0,0 +1,36 @@
+# *****************************************************************************
+ #
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements. See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership. The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License. You may obtain a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing,
+ # software distributed under the License is distributed on an
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ # KIND, either express or implied. See the License for the
+ # specific language governing permissions and limitations
+ # under the License.
+ #
+ # ******************************************************************************
+
+ locals {
+ shared_bucket_name = "${var.service_base_name}-${var.endpoint_id}-shared-bucket"
+ additional_tag = split(":", var.additional_tag)
+ }
+
+ resource "google_storage_bucket" "shared_bucket" {
+ name = local.shared_bucket_name
+ force_destroy = true
+ labels = {
+ name = local.shared_bucket_name
+ "${local.additional_tag[0]}" = local.additional_tag[1]
+ "${var.service_base_name}-tag" = local.shared_bucket_name
+ "endpoint_tag" = var.endpoint_id
+ }
+ }
\ No newline at end of file
diff --git a/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf b/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
index c84621f..cd965ef 100644
--- a/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
+++ b/infrastructure-provisioning/terraform/gcp/endpoint/main/network.tf
@@ -22,8 +22,8 @@
locals {
vpc_id = "${var.service_base_name}-${var.endpoint_id}-vpc"
subnet_name = "${var.service_base_name}-${var.endpoint_id}-subnet"
- firewall_ingress_name = "${var.service_base_name}-${var.endpoint_id}-ing-rule"
- firewall_egress_name = "${var.service_base_name}-${var.endpoint_id}-eg-rule"
+ firewall_ingress_name = "${var.service_base_name}-${var.endpoint_id}-ingress-sg"
+ firewall_egress_name = "${var.service_base_name}-${var.endpoint_id}-egress-sg"
}
resource "google_compute_network" "endpoint_vpc" {
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
index b1d1e7d..21bdf0a 100644
--- a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/gke.tf
@@ -21,7 +21,7 @@
locals {
additional_tag = split(":", var.additional_tag)
- gke_name = "${var.service_base_name}-cluster"
+ gke_name = "${var.service_base_name}-k8s-cluster"
gke_node_pool_name = "${var.service_base_name}-node-pool"
}
diff --git a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
index 04f7ec7..c3bbdcb 100644
--- a/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
+++ b/infrastructure-provisioning/terraform/gcp/ssn-gke/main/modules/gke/vpc.tf
@@ -20,8 +20,8 @@
# ******************************************************************************
locals {
- ssn_vpc_name = "${var.service_base_name}-ssn-vpc"
- ssn_subnet_name = "${var.service_base_name}-ssn-subnet"
+ ssn_vpc_name = "${var.service_base_name}-vpc"
+ ssn_subnet_name = "${var.service_base_name}-subnet"
}
resource "google_compute_network" "ssn_gke_vpc" {
diff --git a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
index 3b0ef82..117f3ee 100644
--- a/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
+++ b/integration-tests/src/main/java/com/epam/dlab/automation/cloud/aws/AmazonHelper.java
@@ -64,7 +64,7 @@
List<String> valuesT1 = new ArrayList<>();
valuesT1.add(instanceName + "*");
- Filter filter = new Filter("tag:" + NamingHelper.getServiceBaseName() + "-Tag", valuesT1);
+ Filter filter = new Filter("tag:" + NamingHelper.getServiceBaseName() + "-tag", valuesT1);
DescribeInstancesRequest describeInstanceRequest = new DescribeInstancesRequest().withFilters(filter);
DescribeInstancesResult describeInstanceResult = ec2.describeInstances(describeInstanceRequest);
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
index 65fb838..16d36be 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/StatusEnvBaseDTO.java
@@ -30,6 +30,7 @@
private String instanceId;
@JsonProperty("exploratory_name")
private String exploratoryName;
+ private String project;
@JsonProperty("exploratory_id")
private String exploratoryId;
@JsonProperty("exploratory_template_name")
@@ -61,6 +62,19 @@
return self;
}
+ public String getProject() {
+ return project;
+ }
+
+ public void setProject(String project) {
+ this.project = project;
+ }
+
+ public T withProject(String project) {
+ setProject(project);
+ return self;
+ }
+
public String getExploratoryId() {
return exploratoryId;
}
diff --git a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java
index 0dad8e4..b41f432 100644
--- a/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java
+++ b/services/dlab-model/src/main/java/com/epam/dlab/dto/exploratory/ExploratoryImageDTO.java
@@ -35,6 +35,8 @@
private Map<String, String> tags;
@JsonProperty("endpoint_name")
private String endpoint;
+ @JsonProperty("conf_shared_image_enabled")
+ private String sharedImageEnabled;
public ExploratoryImageDTO withImageName(String imageName) {
this.imageName = imageName;
@@ -51,6 +53,11 @@
return this;
}
+ public ExploratoryImageDTO withSharedImageEnabled(String sharedImageEnabled) {
+ this.sharedImageEnabled = sharedImageEnabled;
+ return this;
+ }
+
@Override
public MoreObjects.ToStringHelper toStringHelper(Object self) {
return super.toStringHelper(self)
diff --git a/services/provisioning-service/provisioning.yml b/services/provisioning-service/provisioning.yml
index d234e43..53c303e 100644
--- a/services/provisioning-service/provisioning.yml
+++ b/services/provisioning-service/provisioning.yml
@@ -27,7 +27,7 @@
responseDirectory: /opt/dlab/tmp
handlerDirectory: /opt/dlab/handlers
dockerLogDirectory: ${LOG_ROOT_DIR}
-warmupPollTimeout: 50s
+warmupPollTimeout: 2m
resourceStatusPollTimeout: 300m
keyLoaderPollTimeout: 30m
requestEnvStatusTimeout: 50s
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java
index 8a11088..1025ad6 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/ProvisioningServiceApplicationConfiguration.java
@@ -49,7 +49,7 @@
private String handlerDirectory;
@JsonProperty
- private Duration warmupPollTimeout = Duration.minutes(1);
+ private Duration warmupPollTimeout;
@JsonProperty
private Duration resourceStatusPollTimeout = Duration.minutes(3);
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
index 8ccf260..877cc5a 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalCallbackHandler.java
@@ -96,8 +96,9 @@
@Override
protected ComputationalStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
return super.getBaseStatusDTO(status)
- .withExploratoryName(dto.getExploratoryName())
- .withComputationalName(dto.getComputationalName());
+ .withExploratoryName(dto.getExploratoryName())
+ .withComputationalName(dto.getComputationalName())
+ .withProject(dto.getProject());
}
private String instanceId(JsonNode jsonNode) {
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
index c53c86e..8d6e794 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ComputationalConfigureCallbackHandler.java
@@ -56,6 +56,7 @@
return baseStatus
.withExploratoryName(dto.getExploratoryName())
.withComputationalName(dto.getComputationalName())
+ .withProject(dto.getProject())
.withUptime(null)
.withLastActivity(Date.from(Instant.now()));
}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
index 62746e8..047ebf9 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/ExploratoryCallbackHandler.java
@@ -50,14 +50,16 @@
@JsonProperty
private final String exploratoryName;
+ private final String project;
@JsonCreator
public ExploratoryCallbackHandler(@JacksonInject RESTService selfService,
@JsonProperty("action") DockerAction action,
@JsonProperty("uuid") String uuid, @JsonProperty("user") String user,
- @JsonProperty("exploratoryName") String exploratoryName) {
+ String project, @JsonProperty("exploratoryName") String exploratoryName) {
super(selfService, user, uuid, action);
this.exploratoryName = exploratoryName;
+ this.project = project;
}
@Override
@@ -99,6 +101,8 @@
@Override
protected ExploratoryStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
- return super.getBaseStatusDTO(status).withExploratoryName(exploratoryName);
+ return super.getBaseStatusDTO(status)
+ .withExploratoryName(exploratoryName)
+ .withProject(project);
}
}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
index a31fea3..8d46b60 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/core/response/handlers/LibInstallCallbackHandler.java
@@ -111,7 +111,8 @@
protected LibInstallStatusDTO getBaseStatusDTO(UserInstanceStatus status) {
return super.getBaseStatusDTO(status)
.withExploratoryName(dto.getExploratoryName())
- .withUptime(Date.from(Instant.now()))
- .withComputationalName(dto.getComputationalName());
+ .withComputationalName(dto.getComputationalName())
+ .withProject(dto.getProject())
+ .withUptime(Date.from(Instant.now()));
}
}
diff --git a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
index e4d9ecf..b15b342 100644
--- a/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
+++ b/services/provisioning-service/src/main/java/com/epam/dlab/backendapi/resources/base/ExploratoryService.java
@@ -70,7 +70,7 @@
private FileHandlerCallback getFileHandlerCallback(DockerAction action, String uuid, ExploratoryBaseDTO<?> dto) {
return new ExploratoryCallbackHandler(selfService, action, uuid, dto.getCloudSettings().getIamUser(),
- dto.getExploratoryName());
+ dto.getProject(), dto.getExploratoryName());
}
private String nameContainer(String user, DockerAction action, String name) {
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/dataengine-service_configure.json
@@ -3,7 +3,7 @@
"response": {
"result": {
"Action": "Configure notebook server",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
"log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json b/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
index fe2bf0a..81afe8a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/edge_create.json
@@ -6,7 +6,7 @@
"tunnel_port": "22",
"full_edge_conf": {
"notebook_role_profile_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-nb-Profile",
- "tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"edge_security_group_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-edge-SG",
"allocation_id": "eipalloc-2801084f",
"key_name": "${CONF_KEY_NAME}",
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
index 1933ea3..525bd9a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_start.json
@@ -4,7 +4,7 @@
"result": {
"Action": "Start up notebook server",
"ip": "172.31.48.131",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"hostname": "ip-172-31-48-131.us-west-2.compute.internal",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
index 4e1b9f0..e0ee8b1 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_stop.json
@@ -4,7 +4,7 @@
"result": {
"Action": "Stop notebook server",
"user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
"log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
index 74c82ad7..a9e2a3a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
+++ b/services/provisioning-service/src/main/resources/mock_response/aws/notebook_terminate.json
@@ -4,7 +4,7 @@
"result": {
"Action": "Terminate notebook server",
"user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
"log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/dataengine-service_configure.json
@@ -3,7 +3,7 @@
"response": {
"result": {
"Action": "Configure notebook server",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
"log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json b/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
index cda1c9e..b2a9931 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/edge_create.json
@@ -6,7 +6,7 @@
"tunnel_port": "22",
"full_edge_conf": {
"notebook_role_profile_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-nb-Profile",
- "tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"edge_security_group_name": "${CONF_SERVICE_BASE_NAME}-${EDGE_USER_NAME}-edge-SG",
"allocation_id": "eipalloc-2801084f",
"key_name": "${CONF_KEY_NAME}",
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
index 1933ea3..525bd9a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_start.json
@@ -4,7 +4,7 @@
"result": {
"Action": "Start up notebook server",
"ip": "172.31.48.131",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"hostname": "ip-172-31-48-131.us-west-2.compute.internal",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
index 4e1b9f0..e0ee8b1 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_stop.json
@@ -4,7 +4,7 @@
"result": {
"Action": "Stop notebook server",
"user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
"log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
index 74c82ad7..a9e2a3a 100644
--- a/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
+++ b/services/provisioning-service/src/main/resources/mock_response/azure/notebook_terminate.json
@@ -4,7 +4,7 @@
"result": {
"Action": "Terminate notebook server",
"user_own_bucket_name": "${CONF_SERVICE_BASE_NAME}-ssn-bucket",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
"log": "/var/log/dlab/notebook/notebook_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json b/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
index e6661b8..b8be3bf 100644
--- a/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
+++ b/services/provisioning-service/src/main/resources/mock_response/gcp/dataengine-service_configure.json
@@ -3,7 +3,7 @@
"response": {
"result": {
"Action": "Configure notebook server",
- "Tag_name": "${CONF_SERVICE_BASE_NAME}-Tag",
+ "Tag_name": "${CONF_SERVICE_BASE_NAME}-tag",
"notebook_name": "${NOTEBOOK_INSTANCE_NAME}"
},
"log": "/var/log/dlab/emr/emr_${EDGE_USER_NAME}_${REQUEST_ID}.log"
diff --git a/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java b/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
index bf92609..a065248 100644
--- a/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
+++ b/services/provisioning-service/src/test/java/com/epam/dlab/backendapi/core/commands/CommandExecutorMockTest.java
@@ -70,7 +70,7 @@
RESTServiceMock selfService = new RESTServiceMock();
ExploratoryCallbackHandler handler = new ExploratoryCallbackHandler(selfService, action,
- getRequestId(exec), getEdgeUserName(exec), getExploratoryName(exec));
+ getRequestId(exec), getEdgeUserName(exec), "", getExploratoryName(exec));
handler.handle(exec.getResponseFileName(), Files.readAllBytes(Paths.get(exec.getResponseFileName())));
try {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
index 311158a..683f8fc 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ComputationalDAO.java
@@ -21,7 +21,11 @@
import com.epam.dlab.backendapi.util.DateRemoverUtil;
-import com.epam.dlab.dto.*;
+import com.epam.dlab.dto.ResourceURL;
+import com.epam.dlab.dto.SchedulerJobDTO;
+import com.epam.dlab.dto.StatusEnvBaseDTO;
+import com.epam.dlab.dto.UserInstanceDTO;
+import com.epam.dlab.dto.UserInstanceStatus;
import com.epam.dlab.dto.aws.computational.ClusterConfig;
import com.epam.dlab.dto.base.DataEngineType;
import com.epam.dlab.dto.computational.ComputationalStatusDTO;
@@ -36,15 +40,30 @@
import java.time.LocalDateTime;
import java.time.ZoneId;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.UPTIME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
import static com.epam.dlab.backendapi.dao.SchedulerJobDAO.SCHEDULER_DATA;
import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
-import static com.mongodb.client.model.Filters.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.ne;
+import static com.mongodb.client.model.Filters.not;
import static com.mongodb.client.model.Projections.elemMatch;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
import static com.mongodb.client.model.Updates.push;
import static com.mongodb.client.model.Updates.set;
import static java.util.stream.Collectors.toList;
@@ -70,8 +89,8 @@
return COMPUTATIONAL_RESOURCES + FIELD_SET_DELIMETER + fieldName;
}
- private static Bson computationalCondition(String user, String exploratoryName, String compName) {
- return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName),
+ private static Bson computationalCondition(String user, String project, String exploratoryName, String compName) {
+ return and(eq(USER, user), eq(PROJECT, project), eq(EXPLORATORY_NAME, exploratoryName),
eq(COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_NAME, compName));
}
@@ -80,12 +99,14 @@
*
* @param user user name.
* @param exploratoryName name of exploratory.
+ * @param project name of project
* @param computationalDTO object of computational resource.
* @return <b>true</b> if operation was successful, otherwise <b>false</b>.
*/
- public boolean addComputational(String user, String exploratoryName, UserComputationalResource computationalDTO) {
+ public boolean addComputational(String user, String exploratoryName, String project,
+ UserComputationalResource computationalDTO) {
final UpdateResult updateResult = updateOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName),
+ and(exploratoryCondition(user, exploratoryName, project),
not(elemMatch(COMPUTATIONAL_RESOURCES,
eq(COMPUTATIONAL_NAME, computationalDTO.getComputationalName())))),
push(COMPUTATIONAL_RESOURCES, convertToBson(computationalDTO)));
@@ -96,14 +117,15 @@
* Finds and returns the of computational resource.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName the name of exploratory.
* @param computationalName name of computational resource.
* @throws DlabException if exception occurs
*/
- public UserComputationalResource fetchComputationalFields(String user, String exploratoryName,
+ public UserComputationalResource fetchComputationalFields(String user, String project, String exploratoryName,
String computationalName) {
Optional<UserInstanceDTO> opt = findOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName),
+ and(exploratoryCondition(user, exploratoryName, project),
Filters.elemMatch(COMPUTATIONAL_RESOURCES, eq(COMPUTATIONAL_NAME, computationalName))),
fields(include(COMPUTATIONAL_RESOURCES + ".$"), excludeId()),
UserInstanceDTO.class);
@@ -114,10 +136,10 @@
"exploratory name " + exploratoryName + " not found."));
}
- public List<UserComputationalResource> findComputationalResourcesWithStatus(String user, String exploratoryName,
+ public List<UserComputationalResource> findComputationalResourcesWithStatus(String user, String project, String exploratoryName,
UserInstanceStatus status) {
final UserInstanceDTO userInstanceDTO = findOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName),
+ and(exploratoryCondition(user, exploratoryName, project),
elemMatch(COMPUTATIONAL_RESOURCES, eq(STATUS, status.toString()))),
fields(include(COMPUTATIONAL_RESOURCES), excludeId()),
UserInstanceDTO.class)
@@ -139,7 +161,7 @@
try {
Document values = new Document(computationalFieldFilter(STATUS), dto.getStatus());
return updateOne(USER_INSTANCES,
- and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+ and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
elemMatch(COMPUTATIONAL_RESOURCES,
and(eq(COMPUTATIONAL_NAME, dto.getComputationalName()),
not(eq(STATUS, TERMINATED.toString()))))),
@@ -162,7 +184,7 @@
UpdateResult result;
do {
result = updateOne(USER_INSTANCES,
- and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+ and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
elemMatch(COMPUTATIONAL_RESOURCES,
and(not(eq(STATUS, TERMINATED.toString())),
not(eq(STATUS, dto.getStatus()))))),
@@ -174,80 +196,51 @@
return count;
}
- public void updateComputationalStatusesForExploratory(String user, String exploratoryName,
+ public void updateComputationalStatusesForExploratory(String user, String project, String exploratoryName,
UserInstanceStatus dataengineStatus,
UserInstanceStatus dataengineServiceStatus,
UserInstanceStatus... excludedStatuses) {
- updateComputationalResource(user, exploratoryName, dataengineStatus, DataEngineType.SPARK_STANDALONE,
- excludedStatuses);
- updateComputationalResource(user, exploratoryName, dataengineServiceStatus, DataEngineType.CLOUD_SERVICE,
- excludedStatuses);
-
- }
-
- /**
- * Updates status for all corresponding computational resources in Mongo database.
- *
- * @param newStatus new status for computational resources.
- * @param user user name.
- * @param exploratoryStatuses exploratory's status list.
- * @param computationalTypes type list of computational resource (may contain 'dataengine' and/or
- * 'dataengine-service').
- * @param oldComputationalStatuses old statuses of computational resources.
- */
-
- public void updateStatusForComputationalResources(UserInstanceStatus newStatus, String user,
- List<UserInstanceStatus> exploratoryStatuses,
- List<DataEngineType> computationalTypes,
- UserInstanceStatus... oldComputationalStatuses) {
-
- List<String> exploratoryNames = stream(find(USER_INSTANCES,
- and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses))),
- fields(include(EXPLORATORY_NAME)))).map(d -> d.getString(EXPLORATORY_NAME))
- .collect(toList());
-
- exploratoryNames.forEach(explName ->
- getComputationalResourcesWhereStatusIn(user, computationalTypes, explName, oldComputationalStatuses)
- .forEach(compName -> updateComputationalField(user, explName, compName,
- STATUS, newStatus.toString()))
- );
+ updateComputationalResource(user, project, exploratoryName, dataengineStatus,
+ DataEngineType.SPARK_STANDALONE, excludedStatuses);
+ updateComputationalResource(user, project, exploratoryName, dataengineServiceStatus,
+ DataEngineType.CLOUD_SERVICE, excludedStatuses);
}
/**
* Updates the status for single computational resource in Mongo database.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName exploratory's name.
* @param computationalName name of computational resource.
* @param newStatus new status of computational resource.
*/
- public void updateStatusForComputationalResource(String user, String exploratoryName,
- String computationalName,
- UserInstanceStatus newStatus) {
- updateComputationalField(user, exploratoryName, computationalName, STATUS, newStatus.toString());
+ public void updateStatusForComputationalResource(String user, String project, String exploratoryName,
+ String computationalName, UserInstanceStatus newStatus) {
+ updateComputationalField(user, project, exploratoryName, computationalName, STATUS, newStatus.toString());
}
- private void updateComputationalResource(String user, String exploratoryName,
+ private void updateComputationalResource(String user, String project, String exploratoryName,
UserInstanceStatus dataengineServiceStatus, DataEngineType cloudService,
UserInstanceStatus... excludedStatuses) {
UpdateResult result;
do {
result = updateMany(USER_INSTANCES,
- computationalFilter(user, exploratoryName, dataengineServiceStatus.toString(),
- DataEngineType.getDockerImageName(cloudService), excludedStatuses),
+ computationalFilter(user, project, exploratoryName,
+ dataengineServiceStatus.toString(), DataEngineType.getDockerImageName(cloudService), excludedStatuses),
new Document(SET,
new Document(computationalFieldFilter(STATUS), dataengineServiceStatus.toString())));
} while (result.getModifiedCount() > 0);
}
- private Bson computationalFilter(String user, String exploratoryName, String computationalStatus, String
- computationalImage, UserInstanceStatus[] excludedStatuses) {
+ private Bson computationalFilter(String user, String project, String exploratoryName, String computationalStatus,
+ String computationalImage, UserInstanceStatus[] excludedStatuses) {
final String[] statuses = Arrays.stream(excludedStatuses)
.map(UserInstanceStatus::toString)
.toArray(String[]::new);
- return and(exploratoryCondition(user, exploratoryName),
+ return and(exploratoryCondition(user, exploratoryName, project),
elemMatch(COMPUTATIONAL_RESOURCES, and(eq(IMAGE, computationalImage),
not(in(STATUS, statuses)),
not(eq(STATUS, computationalStatus)))));
@@ -286,7 +279,7 @@
values.append(computationalFieldFilter(CONFIG),
dto.getConfig().stream().map(this::convertToBson).collect(toList()));
}
- return updateOne(USER_INSTANCES, and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+ return updateOne(USER_INSTANCES, and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
elemMatch(COMPUTATIONAL_RESOURCES,
and(eq(COMPUTATIONAL_NAME, dto.getComputationalName()),
not(eq(STATUS, TERMINATED.toString()))))),
@@ -309,49 +302,19 @@
return map;
}
-
- /**
- * Updates the requirement for reuploading key for all corresponding computational resources in Mongo database.
- *
- * @param user user name.
- * @param exploratoryStatuses exploratory's status list.
- * @param computationalTypes type list of computational resource (may contain 'dataengine' and/or
- * 'dataengine-service').
- * @param reuploadKeyRequired true/false.
- * @param computationalStatuses statuses of computational resource.
- */
-
- public void updateReuploadKeyFlagForComputationalResources(String user,
- List<UserInstanceStatus> exploratoryStatuses,
- List<DataEngineType> computationalTypes,
- boolean reuploadKeyRequired,
- UserInstanceStatus... computationalStatuses) {
-
- List<String> exploratoryNames = stream(find(USER_INSTANCES,
- and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses))),
- fields(include(EXPLORATORY_NAME)))).map(d -> d.getString(EXPLORATORY_NAME))
- .collect(toList());
-
- exploratoryNames.forEach(explName ->
- getComputationalResourcesWhereStatusIn(user, computationalTypes, explName, computationalStatuses)
- .forEach(compName -> updateComputationalField(user, explName, compName,
- REUPLOAD_KEY_REQUIRED, reuploadKeyRequired))
- );
- }
-
/**
* Updates the requirement for reuploading key for single computational resource in Mongo database.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName exploratory's name.
* @param computationalName name of computational resource.
* @param reuploadKeyRequired true/false.
*/
- public void updateReuploadKeyFlagForComputationalResource(String user, String exploratoryName,
- String computationalName, boolean
- reuploadKeyRequired) {
- updateComputationalField(user, exploratoryName, computationalName, REUPLOAD_KEY_REQUIRED, reuploadKeyRequired);
+ public void updateReuploadKeyFlagForComputationalResource(String user, String project, String exploratoryName,
+ String computationalName, boolean reuploadKeyRequired) {
+ updateComputationalField(user, project, exploratoryName, computationalName, REUPLOAD_KEY_REQUIRED, reuploadKeyRequired);
}
/**
@@ -359,6 +322,7 @@
* have predefined type.
*
* @param user user name.
+ * @param project project name
* @param computationalTypes type list of computational resource which may contain 'dataengine' and/or
* 'dataengine-service'.
* @param exploratoryName name of exploratory.
@@ -367,10 +331,11 @@
*/
@SuppressWarnings("unchecked")
- public List<String> getComputationalResourcesWhereStatusIn(String user, List<DataEngineType> computationalTypes,
+ public List<String> getComputationalResourcesWhereStatusIn(String user, String project,
+ List<DataEngineType> computationalTypes,
String exploratoryName,
UserInstanceStatus... computationalStatuses) {
- return stream((List<Document>) find(USER_INSTANCES, and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName)),
+ return stream((List<Document>) find(USER_INSTANCES, exploratoryCondition(user, exploratoryName, project),
fields(include(COMPUTATIONAL_RESOURCES))).first().get(COMPUTATIONAL_RESOURCES))
.filter(doc ->
statusList(computationalStatuses).contains(doc.getString(STATUS)) &&
@@ -379,9 +344,9 @@
}
@SuppressWarnings("unchecked")
- public List<ClusterConfig> getClusterConfig(String user, String exploratoryName, String computationalName) {
+ public List<ClusterConfig> getClusterConfig(String user, String project, String exploratoryName, String computationalName) {
return findOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName),
+ and(exploratoryCondition(user, exploratoryName, project),
Filters.elemMatch(COMPUTATIONAL_RESOURCES, and(eq(COMPUTATIONAL_NAME, computationalName),
notNull(CONFIG)))),
fields(include(COMPUTATIONAL_RESOURCES + ".$"), excludeId())
@@ -396,41 +361,42 @@
* Updates computational resource's field.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName name of exploratory.
* @param computationalName name of computational resource.
* @param fieldName computational field's name for updating.
* @param fieldValue computational field's value for updating.
*/
- private <T> UpdateResult updateComputationalField(String user, String exploratoryName, String computationalName,
+ private <T> UpdateResult updateComputationalField(String user, String project, String exploratoryName, String computationalName,
String fieldName, T fieldValue) {
return updateOne(USER_INSTANCES,
- computationalCondition(user, exploratoryName, computationalName),
+ computationalCondition(user, project, exploratoryName, computationalName),
set(computationalFieldFilter(fieldName), fieldValue));
}
- public void updateSchedulerSyncFlag(String user, String exploratoryName, boolean syncFlag) {
+ public void updateSchedulerSyncFlag(String user, String project, String exploratoryName, boolean syncFlag) {
final String syncStartField = SCHEDULER_DATA + ".sync_start_required";
UpdateResult result;
do {
- result = updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName),
+ result = updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project),
elemMatch(COMPUTATIONAL_RESOURCES, and(ne(SCHEDULER_DATA, null), ne(syncStartField, syncFlag)))),
set(computationalFieldFilter(syncStartField), syncFlag));
} while (result.getModifiedCount() != 0);
}
- public UpdateResult updateSchedulerDataForComputationalResource(String user, String exploratoryName,
+ public UpdateResult updateSchedulerDataForComputationalResource(String user, String project, String exploratoryName,
String computationalName, SchedulerJobDTO dto) {
- return updateComputationalField(user, exploratoryName, computationalName, SCHEDULER_DATA,
- Objects.isNull(dto) ? null : convertToBson(dto));
+ return updateComputationalField(user, project, exploratoryName, computationalName,
+ SCHEDULER_DATA, Objects.isNull(dto) ? null : convertToBson(dto));
}
- public void updateLastActivity(String user, String exploratoryName,
+ public void updateLastActivity(String user, String project, String exploratoryName,
String computationalName, LocalDateTime lastActivity) {
updateOne(USER_INSTANCES,
- computationalCondition(user, exploratoryName, computationalName),
+ computationalCondition(user, project, exploratoryName, computationalName),
set(computationalFieldFilter(COMPUTATIONAL_LAST_ACTIVITY),
Date.from(lastActivity.atZone(ZoneId.systemDefault()).toInstant())));
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
index 0e85908..ebacc51 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/EnvDAO.java
@@ -40,18 +40,39 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
import static com.epam.dlab.backendapi.dao.MongoCollections.USER_EDGE;
import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
-import static com.mongodb.client.model.Filters.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.not;
+import static com.mongodb.client.model.Filters.or;
import static com.mongodb.client.model.Projections.elemMatch;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
import static java.util.Objects.nonNull;
/**
@@ -159,17 +180,18 @@
/**
* Updates the status of exploratory and computational for user.
*
- * @param user the name of user.
- * @param list the status of node.
+ * @param user the name of user.
+ * @param project name of project
+ * @param list the status of node.
*/
- public void updateEnvStatus(String user, EnvResourceList list) {
+ public void updateEnvStatus(String user, String project, EnvResourceList list) {
if (list != null && notEmpty(list.getHostList())) {
updateEdgeStatus(user, list.getHostList());
if (!list.getHostList().isEmpty()) {
stream(find(USER_INSTANCES, eq(USER, user),
fields(INCLUDE_EXP_UPDATE_FIELDS, excludeId())))
.filter(this::instanceIdPresent)
- .forEach(exp -> updateUserResourceStatuses(user, list, exp));
+ .forEach(exp -> updateUserResourceStatuses(user, project, list, exp));
}
}
}
@@ -195,26 +217,25 @@
}
@SuppressWarnings("unchecked")
- private void updateUserResourceStatuses(String user, EnvResourceList list, Document exp) {
+ private void updateUserResourceStatuses(String user, String project, EnvResourceList list, Document exp) {
final String exploratoryName = exp.getString(EXPLORATORY_NAME);
getEnvResourceAndRemove(list.getHostList(), exp.getString(INSTANCE_ID))
- .ifPresent(resource -> updateExploratoryStatus(user, exploratoryName, exp.getString(STATUS),
- resource.getStatus()));
+ .ifPresent(resource -> updateExploratoryStatus(user, project, exploratoryName,
+ exp.getString(STATUS), resource.getStatus()));
(getComputationalResources(exp))
.stream()
.filter(this::instanceIdPresent)
- .forEach(comp -> updateComputational(user, list, exploratoryName, comp));
+ .forEach(comp -> updateComputational(user, project, list, exploratoryName, comp));
}
- private void updateComputational(String user, EnvResourceList list, String exploratoryName, Document comp) {
+ private void updateComputational(String user, String project, EnvResourceList list, String exploratoryName, Document comp) {
final List<EnvResource> listToCheck = DataEngineType.CLOUD_SERVICE ==
DataEngineType.fromDockerImageName(comp.getString(IMAGE)) ?
list.getClusterList() : list.getHostList();
getEnvResourceAndRemove(listToCheck, comp.getString(INSTANCE_ID))
- .ifPresent(resource -> updateComputationalStatus(user, exploratoryName,
- comp.getString(ComputationalDAO.COMPUTATIONAL_NAME), comp.getString(STATUS), resource
- .getStatus()));
+ .ifPresent(resource -> updateComputationalStatus(user, project, exploratoryName,
+ comp.getString(ComputationalDAO.COMPUTATIONAL_NAME), comp.getString(STATUS), resource.getStatus()));
}
private boolean instanceIdPresent(Document d) {
@@ -339,11 +360,12 @@
* Update the status of exploratory if it needed.
*
* @param user the user name
+ * @param project project name
* @param exploratoryName the name of exploratory
* @param oldStatus old status
* @param newStatus new status
*/
- private void updateExploratoryStatus(String user, String exploratoryName,
+ private void updateExploratoryStatus(String user, String project, String exploratoryName,
String oldStatus, String newStatus) {
LOGGER.trace("Update exploratory status for user {} with exploratory {} from {} to {}", user, exploratoryName,
oldStatus, newStatus);
@@ -356,7 +378,7 @@
LOGGER.debug("Exploratory status for user {} with exploratory {} will be updated from {} to {}", user,
exploratoryName, oldStatus, status);
updateOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
Updates.set(STATUS, status.toString()));
}
}
@@ -399,12 +421,13 @@
* Update the status of exploratory if it needed.
*
* @param user the user name.
+ * @param project project name
* @param exploratoryName the name of exploratory.
* @param computationalName the name of computational.
* @param oldStatus old status.
* @param newStatus new status.
*/
- private void updateComputationalStatus(String user, String exploratoryName, String computationalName,
+ private void updateComputationalStatus(String user, String project, String exploratoryName, String computationalName,
String oldStatus, String newStatus) {
LOGGER.trace("Update computational status for user {} with exploratory {} and computational {} from {} to {}",
user, exploratoryName, computationalName, oldStatus, newStatus);
@@ -420,12 +443,12 @@
"from {} to {}",
user, exploratoryName, computationalName, oldStatus, status);
if (status == UserInstanceStatus.TERMINATED &&
- terminateComputationalSpot(user, exploratoryName, computationalName)) {
+ terminateComputationalSpot(user, project, exploratoryName, computationalName)) {
return;
}
Document values = new Document(COMPUTATIONAL_STATUS_FILTER, status.toString());
updateOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName),
+ and(exploratoryCondition(user, exploratoryName, project),
elemMatch(COMPUTATIONAL_RESOURCES,
and(eq(ComputationalDAO.COMPUTATIONAL_NAME, computationalName))
)
@@ -438,15 +461,16 @@
* Terminate EMR if it is spot.
*
* @param user the user name.
+ * @param project name of project
* @param exploratoryName the name of exploratory.
* @param computationalName the name of computational.
* @return <b>true</b> if computational is spot and should be terminated by docker, otherwise <b>false</b>.
*/
- private boolean terminateComputationalSpot(String user, String exploratoryName, String computationalName) {
+ private boolean terminateComputationalSpot(String user, String project, String exploratoryName, String computationalName) {
LOGGER.trace("Check computatation is spot for user {} with exploratory {} and computational {}", user,
exploratoryName, computationalName);
Document doc = findOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
and(elemMatch(COMPUTATIONAL_RESOURCES,
and(eq(ComputationalDAO.COMPUTATIONAL_NAME, computationalName),
eq(COMPUTATIONAL_SPOT, true),
@@ -471,7 +495,7 @@
ComputationalResourceAws computational = new ComputationalResourceAws();
SelfServiceApplication.getInjector().injectMembers(computational);
UserInfo ui = new UserInfo(user, accessToken);
- computational.terminate(ui, exploratoryName, computationalName);
+ computational.terminate(ui, project, exploratoryName, computationalName);
} catch (Exception e) {
// Cannot terminate EMR, just update status to terminated
LOGGER.warn("Can't terminate computational for user {} with exploratory {} and computational {}. {}",
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
index cd0a64f..1adade7 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryDAO.java
@@ -61,7 +61,6 @@
import static com.mongodb.client.model.Projections.include;
import static com.mongodb.client.model.Updates.set;
import static java.util.stream.Collectors.toList;
-import static org.apache.commons.lang3.StringUtils.EMPTY;
/**
* DAO for user exploratory.
@@ -91,22 +90,18 @@
log.info("{} is initialized", getClass().getSimpleName());
}
- static Bson exploratoryCondition(String user, String exploratoryName) {
- return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName));
+ static Bson exploratoryCondition(String user, String exploratoryName, String project) {
+ return and(eq(USER, user), eq(EXPLORATORY_NAME, exploratoryName), eq(PROJECT, project));
}
- private Bson exploratoryStatusCondition(String user, UserInstanceStatus... exploratoryStatuses) {
- return and(eq(USER, user), in(STATUS, statusList(exploratoryStatuses)));
- }
-
- private static Bson runningExploratoryCondition(String user, String exploratoryName) {
- return and(eq(USER, user),
+ private static Bson runningExploratoryCondition(String user, String exploratoryName, String project) {
+ return and(eq(USER, user), eq(PROJECT, project),
and(eq(EXPLORATORY_NAME, exploratoryName), eq(STATUS, UserInstanceStatus.RUNNING.toString())));
}
- static Bson runningExploratoryAndComputationalCondition(String user, String exploratoryName, String
- computationalName) {
- return and(eq(USER, user),
+ static Bson runningExploratoryAndComputationalCondition(String user, String project, String exploratoryName,
+ String computationalName) {
+ return and(eq(USER, user), eq(PROJECT, project),
and(eq(EXPLORATORY_NAME, exploratoryName), eq(STATUS, UserInstanceStatus.RUNNING.toString()),
eq(COMPUTATIONAL_RESOURCES + "." + COMPUTATIONAL_NAME, computationalName),
eq(COMPUTATIONAL_RESOURCES + "." + STATUS, UserInstanceStatus.RUNNING.toString())));
@@ -125,20 +120,6 @@
}
/**
- * Finds and returns the unique id for exploratory.
- *
- * @param user user name.
- * @param exploratoryName the name of exploratory.
- */
- public String fetchExploratoryId(String user, String exploratoryName) {
- return findOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
- fields(include(EXPLORATORY_ID), excludeId()))
- .orElse(new Document())
- .getOrDefault(EXPLORATORY_ID, EMPTY).toString();
- }
-
- /**
* Finds and returns the info of all user's running notebooks.
*
* @param user user name.
@@ -151,26 +132,16 @@
return getUserInstances(and(eq(PROJECT, project), eq(STATUS, UserInstanceStatus.RUNNING.toString())), false);
}
+ public List<UserInstanceDTO> fetchRunningExploratoryFieldsForProject(String project, List<String> endpoints) {
+ return getUserInstances(and(eq(PROJECT, project), eq(STATUS, UserInstanceStatus.RUNNING.toString()), in(ENDPOINT, endpoints)), false);
+ }
+
public List<UserInstanceDTO> fetchExploratoryFieldsForProject(String project) {
return getUserInstances(and(eq(PROJECT, project)), false);
}
- /**
- * Finds and returns the info of all user's notebooks whose status is present among predefined ones.
- *
- * @param user user name.
- * @param computationalFieldsRequired true/false.
- * @param statuses array of statuses.
- */
- public List<UserInstanceDTO> fetchUserExploratoriesWhereStatusIn(String user, boolean computationalFieldsRequired,
- UserInstanceStatus... statuses) {
- final List<String> statusList = statusList(statuses);
- return getUserInstances(
- and(
- eq(USER, user),
- in(STATUS, statusList)
- ),
- computationalFieldsRequired);
+ public List<UserInstanceDTO> fetchExploratoryFieldsForProjectWithComp(String project) {
+ return getUserInstances(and(eq(PROJECT, project)), true);
}
/**
@@ -224,22 +195,6 @@
false);
}
- /**
- * Finds and returns the info of all user's notebooks whose status is absent among predefined ones.
- *
- * @param user user name.
- * @param statuses array of statuses.
- */
- public List<UserInstanceDTO> fetchUserExploratoriesWhereStatusNotIn(String user, UserInstanceStatus... statuses) {
- final List<String> statusList = statusList(statuses);
- return getUserInstances(
- and(
- eq(USER, user),
- not(in(STATUS, statusList))
- ),
- false);
- }
-
public List<UserInstanceDTO> fetchProjectExploratoriesWhereStatusNotIn(String project, String endpoint,
UserInstanceStatus... statuses) {
final List<String> statusList = statusList(statuses);
@@ -295,35 +250,25 @@
* Finds and returns the info of exploratory (without info about computational resources).
*
* @param user user name.
+ * @param project project name
* @param exploratoryName the name of exploratory.
*/
- public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName) {
- return getExploratory(user, exploratoryName, false).orElseThrow(() ->
+ public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName) {
+ return getExploratory(user, project, exploratoryName, false).orElseThrow(() ->
new ResourceNotFoundException(String.format(EXPLORATORY_NOT_FOUND_MSG, user, exploratoryName)));
}
- public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName,
- boolean includeComputationalResources) {
- return getExploratory(user, exploratoryName, includeComputationalResources).orElseThrow(() ->
+ public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName, boolean includeCompResources) {
+ return getExploratory(user, project, exploratoryName, includeCompResources).orElseThrow(() ->
new ResourceNotFoundException(String.format(EXPLORATORY_NOT_FOUND_MSG, user, exploratoryName)));
}
- /**
- * Checks if exploratory exists.
- *
- * @param user user name.
- * @param exploratoryName the name of exploratory.
- */
- public boolean isExploratoryExist(String user, String exploratoryName) {
- return getExploratory(user, exploratoryName, false).isPresent();
- }
-
- private Optional<UserInstanceDTO> getExploratory(String user, String exploratoryName,
+ private Optional<UserInstanceDTO> getExploratory(String user, String project, String exploratoryName,
boolean includeCompResources) {
return findOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
includeCompResources ? null : fields(exclude(COMPUTATIONAL_RESOURCES)),
UserInstanceDTO.class);
}
@@ -332,12 +277,13 @@
* Finds and returns the info of running exploratory with running cluster.
*
* @param user user name.
+ * @param project name of project
* @param exploratoryName name of exploratory.
* @param computationalName name of cluster
*/
- public UserInstanceDTO fetchExploratoryFields(String user, String exploratoryName, String computationalName) {
+ public UserInstanceDTO fetchExploratoryFields(String user, String project, String exploratoryName, String computationalName) {
return findOne(USER_INSTANCES,
- runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+ runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
UserInstanceDTO.class)
.orElseThrow(() -> new DlabException(String.format("Running notebook %s with running cluster %s not " +
"found for user %s",
@@ -348,10 +294,11 @@
* Finds and returns the info of running exploratory.
*
* @param user user name.
+ * @param project project
* @param exploratoryName name of exploratory.
*/
- public UserInstanceDTO fetchRunningExploratoryFields(String user, String exploratoryName) {
- return findOne(USER_INSTANCES, runningExploratoryCondition(user, exploratoryName),
+ public UserInstanceDTO fetchRunningExploratoryFields(String user, String project, String exploratoryName) {
+ return findOne(USER_INSTANCES, runningExploratoryCondition(user, exploratoryName, project),
fields(exclude(COMPUTATIONAL_RESOURCES)), UserInstanceDTO.class)
.orElseThrow(() -> new DlabException(
String.format("Running exploratory instance for user %s with name %s not found.",
@@ -375,34 +322,22 @@
*/
public UpdateResult updateExploratoryStatus(StatusEnvBaseDTO<?> dto) {
return updateOne(USER_INSTANCES,
- exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+ exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
set(STATUS, dto.getStatus()));
}
/**
- * Updates the status for all user's corresponding exploratories in Mongo database.
- *
- * @param newExploratoryStatus new status for exploratories.
- * @param user user name.
- * @param oldExploratoryStatuses old statuses of exploratories.
- */
- public void updateStatusForExploratories(UserInstanceStatus newExploratoryStatus, String user,
- UserInstanceStatus... oldExploratoryStatuses) {
- updateMany(USER_INSTANCES, exploratoryStatusCondition(user, oldExploratoryStatuses),
- set(STATUS, newExploratoryStatus.toString()));
- }
-
- /**
* Updates status for single exploratory in Mongo database.
*
* @param user user.
+ * @param project project name
* @param exploratoryName name of exploratory.
* @param newStatus new status of exploratory.
* @return The result of an update operation.
*/
- public UpdateResult updateStatusForExploratory(String user, String exploratoryName, UserInstanceStatus newStatus) {
+ public UpdateResult updateStatusForExploratory(String user, String project, String exploratoryName, UserInstanceStatus newStatus) {
return updateOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
set(STATUS, newStatus.toString()));
}
@@ -410,40 +345,29 @@
* Updates the scheduler's data for exploratory in Mongo database.
*
* @param user user.
+ * @param project name of project
* @param exploratoryName name of exploratory.
* @param dto object of scheduler data.
* @return The result of an update operation.
*/
- public UpdateResult updateSchedulerDataForUserAndExploratory(String user, String exploratoryName,
+ public UpdateResult updateSchedulerDataForUserAndExploratory(String user, String project, String exploratoryName,
SchedulerJobDTO dto) {
return updateOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
set(SCHEDULER_DATA, Objects.isNull(dto) ? null : convertToBson(dto)));
}
/**
- * Updates the requirement for reuploading key for all user's corresponding exploratories in Mongo database.
- *
- * @param user user name.
- * @param reuploadKeyRequired true/false.
- * @param exploratoryStatuses statuses of exploratory.
- */
- public void updateReuploadKeyForExploratories(String user, boolean reuploadKeyRequired,
- UserInstanceStatus... exploratoryStatuses) {
- updateMany(USER_INSTANCES, exploratoryStatusCondition(user, exploratoryStatuses),
- set(REUPLOAD_KEY_REQUIRED, reuploadKeyRequired));
- }
-
- /**
* Updates the requirement for reuploading key for single exploratory in Mongo database.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName exploratory's name
* @param reuploadKeyRequired true/false.
*/
- public void updateReuploadKeyForExploratory(String user, String exploratoryName, boolean reuploadKeyRequired) {
+ public void updateReuploadKeyForExploratory(String user, String project, String exploratoryName, boolean reuploadKeyRequired) {
updateOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
set(REUPLOAD_KEY_REQUIRED, reuploadKeyRequired));
}
@@ -481,7 +405,7 @@
}
).collect(Collectors.toList()));
} else if (dto.getPrivateIp() != null) {
- UserInstanceDTO inst = fetchExploratoryFields(dto.getUser(), dto.getExploratoryName());
+ UserInstanceDTO inst = fetchExploratoryFields(dto.getUser(), dto.getProject(), dto.getExploratoryName());
if (!inst.getPrivateIp().equals(dto.getPrivateIp()) && inst.getResourceUrl() != null) {
values.append(EXPLORATORY_URL, inst.getResourceUrl().stream()
.map(url -> replaceIp(dto.getPrivateIp(), inst, url))
@@ -502,13 +426,13 @@
values.append(CLUSTER_CONFIG, dto.getConfig().stream().map(this::convertToBson).collect(toList()));
}
return updateOne(USER_INSTANCES,
- exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+ exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
new Document(SET, values));
}
- public void updateExploratoryIp(String user, String ip, String exploratoryName) {
+ public void updateExploratoryIp(String user, String project, String ip, String exploratoryName) {
- UserInstanceDTO inst = fetchExploratoryFields(user, exploratoryName);
+ UserInstanceDTO inst = fetchExploratoryFields(user, project, exploratoryName);
if (!inst.getPrivateIp().equals(ip)) {
Document values = new Document();
values.append(EXPLORATORY_PRIVATE_IP, ip);
@@ -519,15 +443,15 @@
}
updateOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
new Document(SET, values));
}
}
@SuppressWarnings("unchecked")
- public List<ClusterConfig> getClusterConfig(String user, String exploratoryName) {
- return findOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName), notNull(CLUSTER_CONFIG)),
+ public List<ClusterConfig> getClusterConfig(String user, String project, String exploratoryName) {
+ return findOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project), notNull(CLUSTER_CONFIG)),
fields(include(CLUSTER_CONFIG), excludeId()))
.map(d -> convertFromDocument((List<Document>) d.get(CLUSTER_CONFIG),
new TypeReference<List<ClusterConfig>>() {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
index ebdd028..bcec258 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/ExploratoryLibDAO.java
@@ -38,11 +38,16 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.runningExploratoryAndComputationalCondition;
import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
import static com.mongodb.client.model.Filters.and;
import static com.mongodb.client.model.Filters.eq;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Projections.elemMatch;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
import static com.mongodb.client.model.Updates.push;
/**
@@ -98,17 +103,17 @@
return COMPUTATIONAL_LIBS + "." + computational + FIELD_SET_DELIMETER + fieldName;
}
- private Document findLibraries(String user, String exploratoryName, Bson include) {
+ private Document findLibraries(String user, String project, String exploratoryName, Bson include) {
Optional<Document> opt = findOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
fields(excludeId(), include));
return opt.orElseGet(Document::new);
}
- public List<Library> getLibraries(String user, String exploratoryName) {
- final Document libsDocument = findAllLibraries(user, exploratoryName);
+ public List<Library> getLibraries(String user, String project, String exploratoryName) {
+ final Document libsDocument = findAllLibraries(user, project, exploratoryName);
return Stream
.concat(
libraryStream(libsDocument, exploratoryName, EXPLORATORY_LIBS, ResourceType.EXPLORATORY),
@@ -116,24 +121,23 @@
.collect(Collectors.toList());
}
- public Document findAllLibraries(String user, String exploratoryName) {
- return findLibraries(user, exploratoryName, include(EXPLORATORY_LIBS, COMPUTATIONAL_LIBS,
+ public Document findAllLibraries(String user, String project, String exploratoryName) {
+ return findLibraries(user, project, exploratoryName, include(EXPLORATORY_LIBS, COMPUTATIONAL_LIBS,
COMPUTATIONAL_RESOURCES));
}
- public Document findExploratoryLibraries(String user, String exploratoryName) {
- return findLibraries(user, exploratoryName, include(EXPLORATORY_LIBS));
+ public Document findExploratoryLibraries(String user, String project, String exploratoryName) {
+ return findLibraries(user, project, exploratoryName, include(EXPLORATORY_LIBS));
}
- public Document findComputationalLibraries(String user, String exploratoryName, String computationalName) {
- return findLibraries(user, exploratoryName, include(COMPUTATIONAL_LIBS + "." + computationalName));
+ public Document findComputationalLibraries(String user, String project, String exploratoryName, String computationalName) {
+ return findLibraries(user, project, exploratoryName, include(COMPUTATIONAL_LIBS + "." + computationalName));
}
@SuppressWarnings("unchecked")
- public Library getLibrary(String user, String exploratoryName,
- String libraryGroup, String libraryName) {
+ public Library getLibrary(String user, String project, String exploratoryName, String libraryGroup, String libraryName) {
Optional<Document> userInstance = findOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName),
+ and(exploratoryCondition(user, exploratoryName, project),
elemMatch(EXPLORATORY_LIBS,
and(eq(LIB_GROUP, libraryGroup), eq(LIB_NAME, libraryName))
)),
@@ -153,10 +157,10 @@
}
@SuppressWarnings("unchecked")
- public Library getLibrary(String user, String exploratoryName, String computationalName,
+ public Library getLibrary(String user, String project, String exploratoryName, String computationalName,
String libraryGroup, String libraryName) {
Optional<Document> libraryStatus = findOne(USER_INSTANCES,
- and(runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+ and(runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
libraryConditionComputational(computationalName, libraryGroup, libraryName)
),
@@ -184,18 +188,19 @@
* Add the user's library for exploratory into database.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName name of exploratory.
* @param library library.
* @return <b>true</b> if operation was successful, otherwise <b>false</b>.
*/
- public boolean addLibrary(String user, String exploratoryName, LibInstallDTO library, boolean reinstall) {
+ public boolean addLibrary(String user, String project, String exploratoryName, LibInstallDTO library, boolean reinstall) {
Optional<Document> opt = findOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName),
+ and(exploratoryCondition(user, exploratoryName, project),
elemMatch(EXPLORATORY_LIBS,
and(eq(LIB_GROUP, library.getGroup()), eq(LIB_NAME, library.getName())))));
if (!opt.isPresent()) {
updateOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
push(EXPLORATORY_LIBS, convertToBson(library)));
return true;
} else {
@@ -205,7 +210,7 @@
values.append(libraryFieldFilter(LIB_ERROR_MESSAGE), null);
}
- updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName),
+ updateOne(USER_INSTANCES, and(exploratoryCondition(user, exploratoryName, project),
elemMatch(EXPLORATORY_LIBS,
and(eq(LIB_GROUP, library.getGroup()), eq(LIB_NAME, library.getName())))),
new Document(SET, values));
@@ -217,22 +222,23 @@
* Add the user's library for exploratory into database.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName name of exploratory.
* @param computationalName name of computational.
* @param library library.
* @return <b>true</b> if operation was successful, otherwise <b>false</b>.
*/
- public boolean addLibrary(String user, String exploratoryName, String computationalName,
+ public boolean addLibrary(String user, String project, String exploratoryName, String computationalName,
LibInstallDTO library, boolean reinstall) {
Optional<Document> opt = findOne(USER_INSTANCES,
- and(runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+ and(runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_GROUP, library.getGroup()),
eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_NAME, library.getName())));
if (!opt.isPresent()) {
updateOne(USER_INSTANCES,
- runningExploratoryAndComputationalCondition(user, exploratoryName, computationalName),
+ runningExploratoryAndComputationalCondition(user, project, exploratoryName, computationalName),
push(COMPUTATIONAL_LIBS + "." + computationalName, convertToBson(library)));
return true;
} else {
@@ -243,7 +249,7 @@
}
updateOne(USER_INSTANCES, and(
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_GROUP, library.getGroup()),
eq(COMPUTATIONAL_LIBS + "." + computationalName + "." + LIB_NAME, library.getName())),
@@ -276,7 +282,7 @@
Document values = updateLibraryFields(lib, dto.getUptime());
updateOne(USER_INSTANCES,
- and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+ and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
libraryConditionExploratory(lib.getGroup(), lib.getName())),
new Document(SET, values));
} catch (Exception e) {
@@ -292,7 +298,7 @@
Document values = updateComputationalLibraryFields(dto.getComputationalName(), lib, dto.getUptime());
updateOne(USER_INSTANCES,
- and(exploratoryCondition(dto.getUser(), dto.getExploratoryName()),
+ and(exploratoryCondition(dto.getUser(), dto.getExploratoryName(), dto.getProject()),
elemMatch(COMPUTATIONAL_LIBS + "." + dto.getComputationalName(),
libCondition(lib.getGroup(), lib.getName()))),
new Document(SET, values));
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
index 3d3fb36..f6e8bb6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/IndexCreator.java
@@ -19,8 +19,8 @@
package com.epam.dlab.backendapi.dao;
-import com.mongodb.BasicDBObject;
import com.mongodb.client.model.IndexOptions;
+import com.mongodb.client.model.Indexes;
import io.dropwizard.lifecycle.Managed;
import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
@@ -28,12 +28,11 @@
/** Creates the indexes for mongo collections. */
public class IndexCreator extends BaseDAO implements Managed {
+ private static final String PROJECT_FIELD = "project";
@Override
public void start() {
mongoService.getCollection(USER_INSTANCES)
- .createIndex(new BasicDBObject(USER, 1)
- .append(EXPLORATORY_NAME, 2),
- new IndexOptions().unique(true));
+ .createIndex(Indexes.ascending(USER, EXPLORATORY_NAME, PROJECT_FIELD), new IndexOptions().unique(true));
// TODO: Make refactoring and append indexes for other mongo collections
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
index 2fbb299..fc292dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/SchedulerJobDAO.java
@@ -31,19 +31,35 @@
import org.bson.Document;
import org.bson.conversions.Bson;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.epam.dlab.backendapi.dao.ComputationalDAO.COMPUTATIONAL_NAME;
-import static com.epam.dlab.backendapi.dao.ComputationalDAO.PROJECT;
import static com.epam.dlab.backendapi.dao.ComputationalDAO.IMAGE;
-import static com.epam.dlab.backendapi.dao.ExploratoryDAO.*;
+import static com.epam.dlab.backendapi.dao.ComputationalDAO.PROJECT;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.COMPUTATIONAL_RESOURCES;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.EXPLORATORY_NAME;
+import static com.epam.dlab.backendapi.dao.ExploratoryDAO.exploratoryCondition;
import static com.epam.dlab.backendapi.dao.MongoCollections.USER_INSTANCES;
import static com.epam.dlab.dto.base.DataEngineType.fromDockerImageName;
-import static com.mongodb.client.model.Filters.*;
-import static com.mongodb.client.model.Projections.*;
+import static com.mongodb.client.model.Filters.and;
+import static com.mongodb.client.model.Filters.eq;
+import static com.mongodb.client.model.Filters.exists;
+import static com.mongodb.client.model.Filters.in;
+import static com.mongodb.client.model.Filters.lte;
+import static com.mongodb.client.model.Filters.ne;
+import static com.mongodb.client.model.Filters.or;
+import static com.mongodb.client.model.Projections.excludeId;
+import static com.mongodb.client.model.Projections.fields;
+import static com.mongodb.client.model.Projections.include;
import static java.util.stream.Collectors.toList;
/**
@@ -78,12 +94,13 @@
* Finds and returns the info of user's single scheduler job by exploratory name.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName the name of exploratory.
* @return scheduler job data.
*/
- public Optional<SchedulerJobDTO> fetchSingleSchedulerJobByUserAndExploratory(String user, String exploratoryName) {
+ public Optional<SchedulerJobDTO> fetchSingleSchedulerJobByUserAndExploratory(String user, String project, String exploratoryName) {
return findOne(USER_INSTANCES,
- and(exploratoryCondition(user, exploratoryName), schedulerNotNullCondition()),
+ and(exploratoryCondition(user, exploratoryName, project), schedulerNotNullCondition()),
fields(include(SCHEDULER_DATA), excludeId()))
.map(d -> convertFromDocument((Document) d.get(SCHEDULER_DATA), SchedulerJobDTO.class));
}
@@ -92,16 +109,17 @@
* Finds and returns the info of user's single scheduler job for computational resource.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName the name of exploratory.
* @param computationalName the name of computational resource.
* @return scheduler job data.
*/
@SuppressWarnings("unchecked")
- public Optional<SchedulerJobDTO> fetchSingleSchedulerJobForCluster(String user, String exploratoryName,
+ public Optional<SchedulerJobDTO> fetchSingleSchedulerJobForCluster(String user, String project, String exploratoryName,
String computationalName) {
return findOne(USER_INSTANCES,
- exploratoryCondition(user, exploratoryName),
+ exploratoryCondition(user, exploratoryName, project),
fields(include(COMPUTATIONAL_RESOURCES), excludeId()))
.map(d -> (List<Document>) d.get(COMPUTATIONAL_RESOURCES))
.map(list -> list.stream().filter(d -> d.getString(COMPUTATIONAL_NAME).equals(computationalName))
@@ -140,7 +158,7 @@
eq(CONSIDER_INACTIVITY_FLAG, false)
)
),
- fields(excludeId(), include(USER, EXPLORATORY_NAME, SCHEDULER_DATA))))
+ fields(excludeId(), include(USER, PROJECT, EXPLORATORY_NAME, SCHEDULER_DATA))))
.map(d -> convertFromDocument(d, SchedulerJobData.class))
.collect(toList());
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
index 0767be4..d4bb824 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/dao/UserRoleDaoImpl.java
@@ -21,6 +21,7 @@
import com.epam.dlab.backendapi.resources.dto.UserGroupDto;
import com.epam.dlab.backendapi.resources.dto.UserRoleDto;
import com.epam.dlab.cloud.CloudProvider;
+import com.epam.dlab.exceptions.DlabException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Singleton;
@@ -33,9 +34,12 @@
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
import static com.epam.dlab.backendapi.dao.MongoCollections.USER_GROUPS;
import static com.mongodb.client.model.Aggregates.group;
@@ -55,6 +59,8 @@
private static final String USERS_FIELD = "users";
private static final String GROUPS_FIELD = "groups";
private static final String DESCRIPTION = "description";
+ private static final String TYPE = "type";
+ private static final String CLOUD = "cloud";
private static final String ROLES = "roles";
private static final String GROUPS = "$groups";
private static final String GROUP = "group";
@@ -90,11 +96,20 @@
@Override
public void updateMissingRoles(CloudProvider cloudProvider) {
- getUserRoleFromFile(cloudProvider).stream()
- .filter(u -> findAll().stream()
+ getUserRoleFromFile(cloudProvider)
+ .stream()
+ .peek(u -> u.setGroups(Collections.emptySet()))
+ .filter(u -> findAll()
+ .stream()
.map(UserRoleDto::getId)
.noneMatch(id -> id.equals(u.getId())))
.forEach(this::insert);
+
+ addGroupToRole(aggregateRolesByGroup()
+ .stream()
+ .map(UserGroupDto::getGroup)
+ .collect(Collectors.toSet()),
+ getDefaultShapes(cloudProvider));
}
@Override
@@ -166,9 +181,26 @@
}
}
+ private Set<String> getDefaultShapes(CloudProvider cloudProvider) {
+ if (cloudProvider == CloudProvider.AWS) {
+ return Stream.of("nbShapes_t2.medium_fetching", "compShapes_c4.xlarge_fetching")
+ .collect(Collectors.toSet());
+ } else if (cloudProvider == CloudProvider.GCP) {
+ return Stream.of("compShapes_n1-standard-2_fetching", "nbShapes_n1-standard-2_fetching")
+ .collect(Collectors.toSet());
+ } else if (cloudProvider == CloudProvider.AZURE) {
+ return Stream.of("nbShapes_Standard_E4s_v3_fetching", "compShapes_Standard_E4s_v3_fetching")
+ .collect(Collectors.toSet());
+ } else {
+ throw new DlabException("Unsupported cloud provider " + cloudProvider);
+ }
+ }
+
private Document roleDocument() {
return new Document().append(ID, "$" + ID)
.append(DESCRIPTION, "$" + DESCRIPTION)
+ .append(TYPE, "$" + TYPE)
+ .append(CLOUD, "$" + CLOUD)
.append(USERS_FIELD, "$" + USERS_FIELD)
.append(EXPLORATORY_SHAPES_FIELD, "$" + EXPLORATORY_SHAPES_FIELD)
.append(PAGES_FIELD, "$" + PAGES_FIELD)
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
index f7f89de..bfee5b3 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/CreateProjectDTO.java
@@ -15,7 +15,7 @@
private final Set<String> groups;
@NotNull final Set<String> endpoints;
@NotNull
- @Pattern(regexp = "^ssh-.*\\n?", message = "Wrong key format. Key should be in openSSH format")
+ @Pattern(regexp = "^ssh-.*\\n?", message = "format is incorrect. Please use the openSSH format")
private final String key;
@NotNull
private final String tag;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
index 6b6e978..f288a68 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/EndpointDTO.java
@@ -33,11 +33,11 @@
public class EndpointDTO {
private static final String URL_REGEXP_VALIDATION = "^(http(s)?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]";
- @NotEmpty
- @NotBlank
+ @NotBlank(message = "field cannot be empty")
private final String name;
- @URL(regexp = URL_REGEXP_VALIDATION, message = "endpoint field is in improper format!")
+ @URL(regexp = URL_REGEXP_VALIDATION, message = "field is in improper format!")
private final String url;
+ @NotBlank(message = "field cannot be empty")
private final String account;
@JsonProperty("endpoint_tag")
private final String tag;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
index d3533cc..6bd8a1e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/domain/ProjectDTO.java
@@ -22,7 +22,7 @@
@NotNull
private final Set<String> groups;
@NotNull
- @Pattern(regexp = "^ssh-.*\\n", message = "Wrong key format. Key should be in openSSH format")
+ @Pattern(regexp = "^ssh-.*\\n", message = "format is incorrect. Please use the openSSH format")
private final String key;
@NotNull
private final String tag;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
index f227f3f..ae60e39 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/EnvironmentResource.java
@@ -27,7 +27,12 @@
import org.hibernate.validator.constraints.NotEmpty;
import javax.annotation.security.RolesAllowed;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
@@ -62,59 +67,62 @@
@POST
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
- @Path("stop")
- public Response stopEnv(@Auth UserInfo userInfo,
- @NotEmpty String user) {
+ @Path("stop/{projectName}")
+ public Response stopEnv(@Auth UserInfo userInfo, @NotEmpty String user, @PathParam("projectName") String projectName) {
log.info("User {} is stopping {} environment", userInfo.getName(), user);
- environmentService.stopEnvironment(userInfo, user);
+ environmentService.stopEnvironment(userInfo, user, projectName);
return Response.ok().build();
}
@POST
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
- @Path("stop/{exploratoryName}")
+ @Path("stop/{projectName}/{exploratoryName}")
public Response stopNotebook(@Auth UserInfo userInfo, @NotEmpty String user,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName) {
log.info("Admin {} is stopping notebook {} of user {}", userInfo.getName(), exploratoryName, user);
- environmentService.stopExploratory(userInfo, user, exploratoryName);
+ environmentService.stopExploratory(userInfo, user, projectName, exploratoryName);
return Response.ok().build();
}
@POST
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
- @Path("stop/{exploratoryName}/{computationalName}")
+ @Path("stop/{projectName}/{exploratoryName}/{computationalName}")
public Response stopCluster(@Auth UserInfo userInfo, @NotEmpty String user,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.info("Admin {} is stopping computational resource {} affiliated with exploratory {} of user {}",
userInfo.getName(), computationalName, exploratoryName, user);
- environmentService.stopComputational(userInfo, user, exploratoryName, computationalName);
+ environmentService.stopComputational(userInfo, user, projectName, exploratoryName, computationalName);
return Response.ok().build();
}
@POST
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
- @Path("terminate/{exploratoryName}")
+ @Path("terminate/{projectName}/{exploratoryName}")
public Response terminateNotebook(@Auth UserInfo userInfo, @NotEmpty String user,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName) {
log.info("Admin {} is terminating notebook {} of user {}", userInfo.getName(), exploratoryName, user);
- environmentService.terminateExploratory(userInfo, user, exploratoryName);
+ environmentService.terminateExploratory(userInfo, user, projectName, exploratoryName);
return Response.ok().build();
}
@POST
@Consumes(MediaType.TEXT_PLAIN)
@Produces(MediaType.APPLICATION_JSON)
- @Path("terminate/{exploratoryName}/{computationalName}")
+ @Path("terminate/{projectName}/{exploratoryName}/{computationalName}")
public Response terminateCluster(@Auth UserInfo userInfo, @NotEmpty String user,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.info("Admin {} is terminating computational resource {} affiliated with exploratory {} of user {}",
userInfo.getName(), computationalName, exploratoryName, user);
- environmentService.terminateComputational(userInfo, user, exploratoryName, computationalName);
+ environmentService.terminateComputational(userInfo, user, projectName, exploratoryName, computationalName);
return Response.ok().build();
}
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
index cb2c7d2..7b29af1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ExploratoryResource.java
@@ -37,7 +37,14 @@
import javax.annotation.security.RolesAllowed;
import javax.validation.Valid;
import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.List;
@@ -109,11 +116,12 @@
* @return Invocation response as JSON string.
*/
@DELETE
- @Path("/{name}/stop")
+ @Path("/{project}/{name}/stop")
public String stop(@Auth UserInfo userInfo,
+ @PathParam("project") String project,
@PathParam("name") String name) {
log.debug("Stopping exploratory environment {} for user {}", name, userInfo.getName());
- return exploratoryService.stop(userInfo, name);
+ return exploratoryService.stop(userInfo, project, name);
}
/**
@@ -124,29 +132,32 @@
* @return Invocation response as JSON string.
*/
@DELETE
- @Path("/{name}/terminate")
+ @Path("/{project}/{name}/terminate")
public String terminate(@Auth UserInfo userInfo,
+ @PathParam("project") String project,
@PathParam("name") String name) {
log.debug("Terminating exploratory environment {} for user {}", name, userInfo.getName());
- return exploratoryService.terminate(userInfo, name);
+ return exploratoryService.terminate(userInfo, project, name);
}
@PUT
- @Path("/{name}/reconfigure")
+ @Path("/{project}/{name}/reconfigure")
public Response reconfigureSpark(@Auth UserInfo userInfo,
+ @PathParam("project") String project,
@PathParam("name") String name,
List<ClusterConfig> config) {
log.debug("Updating exploratory {} spark cluster for user {}", name, userInfo.getName());
- exploratoryService.updateClusterConfig(userInfo, name, config);
+ exploratoryService.updateClusterConfig(userInfo, project, name, config);
return Response.ok().build();
}
@GET
- @Path("/{name}/cluster/config")
+ @Path("/{project}/{name}/cluster/config")
public Response getClusterConfig(@Auth UserInfo userInfo,
+ @PathParam("project") String project,
@PathParam("name") String name) {
log.debug("Getting exploratory {} spark cluster configuration for user {}", name, userInfo.getName());
- return Response.ok(exploratoryService.getClusterConfig(userInfo, name)).build();
+ return Response.ok(exploratoryService.getClusterConfig(userInfo, project, name)).build();
}
private Exploratory getExploratory(ExploratoryCreateFormDTO formDTO) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
index a39a468..f913e2b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ImageExploratoryResource.java
@@ -30,8 +30,18 @@
import javax.validation.Valid;
import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
import java.net.URI;
import java.util.List;
@@ -58,8 +68,8 @@
@Valid @NotNull ExploratoryImageCreateFormDTO formDTO,
@Context UriInfo uriInfo) {
log.debug("Creating an image {} for user {}", formDTO, ui.getName());
- String uuid = imageExploratoryService.createImage(ui, formDTO.getNotebookName(), formDTO.getName(), formDTO
- .getDescription());
+ String uuid = imageExploratoryService.createImage(ui, formDTO.getProjectName(), formDTO.getNotebookName(),
+ formDTO.getName(), formDTO.getDescription());
requestId.put(ui.getName(), uuid);
final URI imageUri = UriBuilder.fromUri(uriInfo.getRequestUri())
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
index 64ede19..841ed73 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/LibExploratoryResource.java
@@ -41,7 +41,12 @@
import javax.validation.Valid;
import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.List;
@@ -81,6 +86,7 @@
@GET
@Path("/lib_groups")
public Iterable<String> getLibGroupList(@Auth UserInfo userInfo,
+ @QueryParam("project_name") @NotBlank String projectName,
@QueryParam("exploratory_name") @NotBlank String exploratoryName,
@QueryParam("computational_name") String computationalName) {
@@ -88,11 +94,11 @@
exploratoryName, computationalName);
try {
if (StringUtils.isEmpty(computationalName)) {
- UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+ UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), projectName,
exploratoryName);
return ExploratoryLibCache.getCache().getLibGroupList(userInfo, userInstance);
} else {
- UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+ UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), projectName,
exploratoryName, computationalName);
userInstance.setResources(userInstance.getResources().stream()
@@ -120,13 +126,14 @@
@GET
@Path("/lib_list")
public List<Document> getLibList(@Auth UserInfo userInfo,
+ @QueryParam("project_name") @NotBlank String projectName,
@QueryParam("exploratory_name") @NotBlank String exploratoryName,
@QueryParam("computational_name") String computationalName) {
log.debug("Loading list of libraries for user {} and exploratory {} and computational {}", userInfo.getName(),
exploratoryName, computationalName);
try {
- return libraryService.getLibs(userInfo.getName(), exploratoryName, computationalName);
+ return libraryService.getLibs(userInfo.getName(), projectName, exploratoryName, computationalName);
} catch (Exception t) {
log.error("Cannot load installed libraries for user {} and exploratory {} an", userInfo.getName(),
@@ -147,14 +154,14 @@
*/
@GET
@Path("/lib_list/formatted")
-
public List<LibInfoRecord> getLibListFormatted(@Auth UserInfo userInfo,
+ @QueryParam("project_name") @NotBlank String projectName,
@QueryParam("exploratory_name") @NotBlank String exploratoryName) {
log.debug("Loading formatted list of libraries for user {} and exploratory {}", userInfo.getName(),
exploratoryName);
try {
- return libraryService.getLibInfo(userInfo.getName(), exploratoryName);
+ return libraryService.getLibInfo(userInfo.getName(), projectName, exploratoryName);
} catch (Exception t) {
log.error("Cannot load list of libraries for user {} and exploratory {}", userInfo.getName(),
exploratoryName, t);
@@ -175,15 +182,16 @@
public Response libInstall(@Auth UserInfo userInfo,
@Valid @NotNull LibInstallFormDTO formDTO) {
log.debug("Installing libs to environment {} for user {}", formDTO, userInfo.getName());
+ String project = formDTO.getProject();
final String exploratoryName = formDTO.getNotebookName();
final List<LibInstallDTO> libs = formDTO.getLibs();
final String computationalName = formDTO.getComputationalName();
String uuid = StringUtils.isEmpty(computationalName) ?
- libraryService.installExploratoryLibs(userInfo, exploratoryName, libs) :
- libraryService.installComputationalLibs(userInfo, exploratoryName, computationalName, libs);
+ libraryService.installExploratoryLibs(userInfo, project, exploratoryName, libs) :
+ libraryService.installComputationalLibs(userInfo, project, exploratoryName, computationalName, libs);
return Response.ok(uuid)
.build();
- }
+ }
/**
* Returns the list of available libraries for exploratory basing on search conditions provided in @formDTO.
@@ -203,7 +211,7 @@
if (StringUtils.isNotEmpty(formDTO.getComputationalName())) {
- userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+ userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getProjectName(),
formDTO.getNotebookName(), formDTO.getComputationalName());
userInstance.setResources(userInstance.getResources().stream()
@@ -211,7 +219,8 @@
.collect(Collectors.toList()));
} else {
- userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getNotebookName());
+ userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO.getProjectName(),
+ formDTO.getNotebookName());
}
return ExploratoryLibCache.getCache().getLibList(userInfo, userInstance, formDTO.getGroup(), formDTO
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
index a93224c..93e42bb 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/ProjectResource.java
@@ -104,33 +104,12 @@
@RolesAllowed("/api/project")
public Response stopProject(@Parameter(hidden = true) @Auth UserInfo userInfo,
@NotNull @Valid ProjectActionFormDTO stopProjectDTO) {
- projectService.stop(userInfo, stopProjectDTO.getEndpoints(), stopProjectDTO.getProjectName());
+ projectService.stopWithResources(userInfo, stopProjectDTO.getEndpoints(), stopProjectDTO.getProjectName());
return Response
.accepted()
.build();
}
- @Operation(summary = "Stop project on Manage environment popup", tags = "project")
- @ApiResponses({
- @ApiResponse(responseCode = "202", description = "Project is stopping"),
- @ApiResponse(responseCode = "400", description = "Validation error", content = @Content(mediaType =
- MediaType.APPLICATION_JSON,
- schema = @Schema(implementation = ErrorDTO.class)))
- })
- @Path("managing/stop/{name}")
- @POST
- @Consumes(MediaType.APPLICATION_JSON)
- @RolesAllowed("/api/project")
- public Response stopProjectWithResources(@Parameter(hidden = true) @Auth UserInfo userInfo,
- @Parameter(description = "Project name")
- @PathParam("name") String name) {
- projectService.stopWithResources(userInfo, name);
- return Response
- .accepted()
- .build();
- }
-
-
@Operation(summary = "Get project info", tags = "project")
@ApiResponses({
@ApiResponse(responseCode = "200", description = "Return information about project",
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
index c4f9ee4..dd8f82f 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/SchedulerJobResource.java
@@ -28,7 +28,14 @@
import io.dropwizard.auth.Auth;
import lombok.extern.slf4j.Slf4j;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
@@ -56,12 +63,13 @@
* @return response
*/
@POST
- @Path("/{exploratoryName}")
+ @Path("/{projectName}/{exploratoryName}")
@Consumes(MediaType.APPLICATION_JSON)
public Response updateExploratoryScheduler(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@SchedulerJobDTOValid SchedulerJobDTO dto) {
- schedulerJobService.updateExploratorySchedulerData(userInfo.getName(), exploratoryName, dto);
+ schedulerJobService.updateExploratorySchedulerData(userInfo.getName(), projectName, exploratoryName, dto);
return Response.ok().build();
}
@@ -92,16 +100,17 @@
* @return response
*/
@POST
- @Path("/{exploratoryName}/{computationalName}")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}")
@Consumes(MediaType.APPLICATION_JSON)
public Response updateComputationalScheduler(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName,
@SchedulerJobDTOValid SchedulerJobDTO dto) {
- schedulerJobService.updateComputationalSchedulerData(userInfo.getName(), exploratoryName,
+ schedulerJobService.updateComputationalSchedulerData(userInfo.getName(), projectName, exploratoryName,
computationalName, dto);
return Response.ok().build();
- }
+ }
/**
* Updates computational resource <code>computationalName<code/> affiliated with exploratory
@@ -132,13 +141,14 @@
* @return scheduler job data
*/
@GET
- @Path("/{exploratoryName}")
+ @Path("/{projectName}/{exploratoryName}")
@Produces(MediaType.APPLICATION_JSON)
public Response fetchSchedulerJobForUserAndExploratory(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName) {
log.debug("Loading scheduler job for user {} and exploratory {}...", userInfo.getName(), exploratoryName);
final SchedulerJobDTO schedulerJob =
- schedulerJobService.fetchSchedulerJobForUserAndExploratory(userInfo.getName(), exploratoryName);
+ schedulerJobService.fetchSchedulerJobForUserAndExploratory(userInfo.getName(), projectName, exploratoryName);
return Response.ok(schedulerJob).build();
}
@@ -152,15 +162,16 @@
* @return scheduler job data
*/
@GET
- @Path("/{exploratoryName}/{computationalName}")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}")
@Produces(MediaType.APPLICATION_JSON)
public Response fetchSchedulerJobForComputationalResource(@Auth UserInfo userInfo,
@PathParam("exploratoryName") String exploratoryName,
+ @PathParam("projectName") String projectName,
@PathParam("computationalName") String computationalName) {
log.debug("Loading scheduler job for user {}, exploratory {} and computational resource {}...",
userInfo.getName(), exploratoryName, computationalName);
final SchedulerJobDTO schedulerJob = schedulerJobService
- .fetchSchedulerJobForComputationalResource(userInfo.getName(), exploratoryName, computationalName);
+ .fetchSchedulerJobForComputationalResource(userInfo.getName(), projectName, exploratoryName, computationalName);
return Response.ok(schedulerJob).build();
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
index d2ed15d..87f99bd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/aws/ComputationalResourceAws.java
@@ -34,13 +34,18 @@
import com.epam.dlab.rest.contracts.ComputationalAPI;
import com.google.inject.Inject;
import io.dropwizard.auth.Auth;
-import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import lombok.extern.slf4j.Slf4j;
import javax.validation.Valid;
import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.List;
@@ -57,12 +62,17 @@
@Produces(MediaType.APPLICATION_JSON)
@Slf4j
public class ComputationalResourceAws implements ComputationalAPI {
-
@Inject
private SelfServiceApplicationConfiguration configuration;
@Inject
private ComputationalService computationalService;
+ @GET
+ @Path("/{project}/{endpoint}/templates")
+ public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+ @PathParam("endpoint") String endpoint) {
+ return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+ }
/**
* Asynchronously creates EMR cluster
@@ -133,13 +143,14 @@
* @return 200 OK if operation is successfully triggered
*/
@DELETE
- @Path("/{exploratoryName}/{computationalName}/terminate")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
public Response terminate(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
- computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+ computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
return Response.ok().build();
}
@@ -155,14 +166,15 @@
@DELETE
@Path("/{project}/{exploratoryName}/{computationalName}/stop")
public Response stop(@Auth UserInfo userInfo,
+ @PathParam("project") String project,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
- computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+ computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
return Response.ok().build();
- }
+ }
/**
* Sends request to provisioning service for starting the computational resource for user.
@@ -186,22 +198,24 @@
}
@PUT
- @Path("dataengine/{exploratoryName}/{computationalName}/config")
+ @Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName,
@Valid @NotNull List<ClusterConfig> config) {
- computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+ computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
return Response.ok().build();
}
@GET
- @Path("{exploratoryName}/{computationalName}/config")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}/config")
public Response getClusterConfig(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
- return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+ return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
}
private void validate(SparkStandaloneClusterCreateForm form) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
index 1ba09d8..29f9794 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/azure/ComputationalResourceAzure.java
@@ -21,26 +21,27 @@
import com.epam.dlab.auth.UserInfo;
import com.epam.dlab.auth.rest.UserSessionDurationAuthorizer;
-import com.epam.dlab.backendapi.conf.SelfServiceApplicationConfiguration;
-import com.epam.dlab.backendapi.dao.ComputationalDAO;
-import com.epam.dlab.backendapi.dao.ExploratoryDAO;
import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
import com.epam.dlab.backendapi.roles.RoleType;
import com.epam.dlab.backendapi.roles.UserRoles;
import com.epam.dlab.backendapi.service.ComputationalService;
-import com.epam.dlab.constants.ServiceConsts;
import com.epam.dlab.dto.aws.computational.ClusterConfig;
import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.rest.client.RESTService;
import com.google.inject.Inject;
-import com.google.inject.name.Named;
import io.dropwizard.auth.Auth;
+import io.swagger.v3.oas.annotations.Parameter;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.security.RolesAllowed;
import javax.validation.Valid;
import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.List;
@@ -53,22 +54,19 @@
@Produces(MediaType.APPLICATION_JSON)
@Slf4j
public class ComputationalResourceAzure {
+ private final ComputationalService computationalService;
@Inject
- private ExploratoryDAO exploratoryDAO;
+ public ComputationalResourceAzure(ComputationalService computationalService) {
+ this.computationalService = computationalService;
+ }
- @Inject
- private ComputationalDAO computationalDAO;
-
- @Inject
- @Named(ServiceConsts.PROVISIONING_SERVICE_NAME)
- private RESTService provisioningService;
-
- @Inject
- private SelfServiceApplicationConfiguration configuration;
-
- @Inject
- private ComputationalService computationalService;
+ @GET
+ @Path("/{project}/{endpoint}/templates")
+ public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+ @PathParam("endpoint") String endpoint) {
+ return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+ }
/**
* Asynchronously creates computational Spark cluster.
@@ -105,14 +103,15 @@
* @return 200 OK if operation is successfully triggered
*/
@DELETE
- @Path("/{exploratoryName}/{computationalName}/terminate")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
public Response terminate(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
- computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+ computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
return Response.ok().build();
}
@@ -127,12 +126,13 @@
*/
@DELETE
@Path("/{project}/{exploratoryName}/{computationalName}/stop")
- public Response stop( @Auth UserInfo userInfo,
+ public Response stop(@Auth UserInfo userInfo,
+ @PathParam("project") String project,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
- computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+ computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
return Response.ok().build();
}
@@ -159,21 +159,23 @@
}
@PUT
- @Path("dataengine/{exploratoryName}/{computationalName}/config")
+ @Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName,
@Valid @NotNull List<ClusterConfig> config) {
- computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+ computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
return Response.ok().build();
}
@GET
- @Path("{exploratoryName}/{computationalName}/config")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}/config")
public Response getClusterConfig(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
- return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+ return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
}
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
index 111bcfa..abf4c6d 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/CheckInactivityCallback.java
@@ -58,9 +58,8 @@
@Path("computational")
public Response updateComputationalLastActivity(CheckInactivityStatusDTO dto) {
requestId.checkAndRemove(dto.getRequestId());
- inactivityService.updateLastActivityForComputational(new UserInfo(dto.getUser(), null),
- dto.getExploratoryName(),
- dto.getComputationalName(), toLocalDateTime(dto.getLastActivityUnixTime()));
+ inactivityService.updateLastActivityForComputational(new UserInfo(dto.getUser(), null), null,
+ dto.getExploratoryName(), dto.getComputationalName(), toLocalDateTime(dto.getLastActivityUnixTime()));
return Response.ok().build();
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
index 04ca8fb..2b286b5 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ComputationalCallback.java
@@ -19,7 +19,6 @@
package com.epam.dlab.backendapi.resources.callback;
-import com.epam.dlab.auth.UserInfo;
import com.epam.dlab.backendapi.dao.ComputationalDAO;
import com.epam.dlab.backendapi.domain.RequestId;
import com.epam.dlab.backendapi.service.ComputationalService;
@@ -29,7 +28,6 @@
import com.epam.dlab.dto.computational.ComputationalStatusDTO;
import com.epam.dlab.dto.computational.UserComputationalResource;
import com.epam.dlab.exceptions.DlabException;
-import com.epam.dlab.model.ResourceData;
import com.epam.dlab.rest.contracts.ApiCallbacks;
import com.google.inject.Inject;
import lombok.extern.slf4j.Slf4j;
@@ -42,8 +40,6 @@
import javax.ws.rs.core.Response;
import java.util.Date;
-import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-
@Path("/infrastructure_provision/computational_resources")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@@ -75,11 +71,12 @@
String uuid = dto.getRequestId();
requestId.checkAndRemove(uuid);
- UserComputationalResource compResource = computationalService.getComputationalResource(dto.getUser(),
- dto.getExploratoryName(), dto.getComputationalName()).orElseThrow(() ->
- new DlabException("Computational resource " + dto.getComputationalName() +
- " of exploratory environment " + dto.getExploratoryName() + " for user " + dto.getUser() +
- " doesn't exist"));
+ UserComputationalResource compResource = computationalService.getComputationalResource(dto.getUser(), dto.getProject(),
+ dto.getExploratoryName(), dto.getComputationalName())
+ .orElseThrow(() ->
+ new DlabException(String.format("Computational resource %s of exploratory environment %s of " +
+ "project %s for user %s doesn't exist", dto.getComputationalName(),
+ dto.getExploratoryName(), dto.getProject(), dto.getUser())));
log.debug("Current status for computational resource {} of exploratory environment {} for user {} is {}",
dto.getComputationalName(), dto.getExploratoryName(), dto.getUser(),
compResource.getStatus());
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
index 618fb04..d4c059e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/EnvironmentStatusCallback.java
@@ -20,14 +20,12 @@
package com.epam.dlab.backendapi.resources.callback;
import com.epam.dlab.backendapi.dao.EnvDAO;
-import com.epam.dlab.auth.UserInfo;
import com.epam.dlab.backendapi.domain.RequestId;
import com.epam.dlab.dto.UserInstanceStatus;
import com.epam.dlab.dto.status.EnvStatusDTO;
import com.epam.dlab.exceptions.DlabException;
import com.epam.dlab.rest.contracts.ApiCallbacks;
import com.google.inject.Inject;
-import io.dropwizard.auth.Auth;
import lombok.extern.slf4j.Slf4j;
import javax.ws.rs.Consumes;
@@ -63,7 +61,7 @@
if (UserInstanceStatus.FAILED == UserInstanceStatus.of(dto.getStatus())) {
log.warn("Request for the status of resources for user {} fails: {}", dto.getUser(), dto.getErrorMessage());
} else {
- envDAO.updateEnvStatus(dto.getUser(), dto.getResourceList());
+ envDAO.updateEnvStatus(dto.getUser(), null, dto.getResourceList());
}
} catch (DlabException e) {
log.warn("Could not update status of resources for user {}: {}", dto.getUser(), e.getLocalizedMessage(), e);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
index 8e11c0b..c275a18 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/callback/ExploratoryCallback.java
@@ -41,7 +41,11 @@
import javax.ws.rs.core.Response;
import java.util.Date;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATED;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
@Path("/infrastructure_provision/exploratory_environment")
@@ -78,7 +82,7 @@
dto.getExploratoryName(), dto.getUser(), dto.getStatus());
requestId.checkAndRemove(dto.getRequestId());
- UserInstanceDTO instance = exploratoryService.getUserInstance(dto.getUser(), dto.getExploratoryName())
+ UserInstanceDTO instance = exploratoryService.getUserInstance(dto.getUser(), dto.getProject(), dto.getExploratoryName())
.orElseThrow(() -> new DlabException(String.format(USER_INSTANCE_NOT_EXIST_MSG,
dto.getExploratoryName(), dto.getUser())));
@@ -89,15 +93,15 @@
try {
exploratoryDAO.updateExploratoryFields(dto.withLastActivity(new Date()));
if (currentStatus == TERMINATING) {
- updateComputationalStatuses(dto.getUser(), dto.getExploratoryName(),
+ updateComputationalStatuses(dto.getUser(), dto.getProject(), dto.getExploratoryName(),
UserInstanceStatus.of(dto.getStatus()));
} else if (currentStatus == STOPPING) {
- updateComputationalStatuses(dto.getUser(), dto.getExploratoryName(),
+ updateComputationalStatuses(dto.getUser(), dto.getProject(), dto.getExploratoryName(),
UserInstanceStatus.of(dto.getStatus()), TERMINATED, FAILED, TERMINATED, STOPPED);
}
} catch (DlabException e) {
- log.error("Could not update status for exploratory environment {} for user {} to {}",
- dto.getExploratoryName(), dto.getUser(), dto.getStatus(), e);
+ log.error("Could not update status for exploratory environment {} in project {} for user {} to {}",
+ dto.getExploratoryName(), dto.getProject(), dto.getUser(), dto.getStatus(), e);
throw new DlabException("Could not update status for exploratory environment " + dto.getExploratoryName() +
" for user " + dto.getUser() + " to " + dto.getStatus() + ": " + e.getLocalizedMessage(), e);
}
@@ -109,23 +113,25 @@
* Updates the computational status of exploratory environment.
*
* @param user user name
+ * @param project project name
* @param exploratoryName name of exploratory environment.
* @param status status for exploratory environment.
*/
- private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus status) {
+ private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus status) {
log.debug("updating status for all computational resources of {} for user {}: {}", exploratoryName, user,
status);
computationalDAO.updateComputationalStatusesForExploratory(new ExploratoryStatusDTO()
.withUser(user)
.withExploratoryName(exploratoryName)
+ .withProject(project)
.withStatus(status));
}
- private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus
+ private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus
dataEngineStatus, UserInstanceStatus dataEngineServiceStatus, UserInstanceStatus... excludedStatuses) {
log.debug("updating status for all computational resources of {} for user {}: DataEngine {}, " +
"dataengine-service {}", exploratoryName, user, dataEngineStatus, dataEngineServiceStatus);
- computationalDAO.updateComputationalStatusesForExploratory(user, exploratoryName, dataEngineStatus,
- dataEngineServiceStatus, excludedStatuses);
+ computationalDAO.updateComputationalStatusesForExploratory(user, project, exploratoryName,
+ dataEngineStatus, dataEngineServiceStatus, excludedStatuses);
}
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java
new file mode 100644
index 0000000..9871918
--- /dev/null
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ComputationalTemplatesDTO.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.epam.dlab.backendapi.resources.dto;
+
+import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import lombok.Data;
+
+import java.util.List;
+
+@Data
+public class ComputationalTemplatesDTO {
+ private final List<FullComputationalTemplate> templates;
+ @JsonProperty("user_computations")
+ private final List<String> userComputations;
+ @JsonProperty("project_computations")
+ private final List<String> projectComputations;
+}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
index 9c3eb30..14193f2 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/ExploratoryImageCreateFormDTO.java
@@ -27,11 +27,13 @@
@Data
@ToString
public class ExploratoryImageCreateFormDTO {
-
- @NotBlank
- @JsonProperty("exploratory_name")
- private String notebookName;
- @NotBlank
- private final String name;
- private final String description;
+ @NotBlank
+ private final String name;
+ @NotBlank
+ @JsonProperty("exploratory_name")
+ private String notebookName;
+ @NotBlank
+ @JsonProperty("project_name")
+ private String projectName;
+ private final String description;
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
index 1a3b8a8..c2b8d1a 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/LibInstallFormDTO.java
@@ -39,6 +39,9 @@
@JsonProperty("computational_name")
private String computationalName;
+ @JsonProperty("project_name")
+ private String project;
+
@NotEmpty
@JsonProperty
private List<LibInstallDTO> libs;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
index f817c4e..ff6edb6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/SearchLibsFormDTO.java
@@ -30,6 +30,10 @@
private String notebookName;
@NotBlank
+ @JsonProperty("project_name")
+ private String projectName;
+
+ @NotBlank
@JsonProperty
private String group;
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
index 21ce26d..5c90602 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/dto/UserRoleDto.java
@@ -18,6 +18,7 @@
*/
package com.epam.dlab.backendapi.resources.dto;
+import com.epam.dlab.cloud.CloudProvider;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Getter;
@@ -31,10 +32,11 @@
@ToString
@JsonIgnoreProperties(ignoreUnknown = true)
public class UserRoleDto {
-
@JsonProperty("_id")
private String id;
private String description;
+ private Type type;
+ private CloudProvider cloud;
private Set<String> pages;
private Set<String> computationals;
private Set<String> exploratories;
@@ -42,4 +44,12 @@
private Set<String> exploratoryShapes;
private Set<String> groups;
+ private enum Type {
+ NOTEBOOK,
+ COMPUTATIONAL,
+ NOTEBOOK_SHAPE,
+ COMPUTATIONAL_SHAPE,
+ BILLING,
+ ADMINISTRATION
+ }
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
index 43fca4a..087330a 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/resources/gcp/ComputationalResourceGcp.java
@@ -39,7 +39,13 @@
import javax.validation.Valid;
import javax.validation.constraints.NotNull;
-import javax.ws.rs.*;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.List;
@@ -55,13 +61,23 @@
@Produces(MediaType.APPLICATION_JSON)
@Slf4j
public class ComputationalResourceGcp implements ComputationalAPI {
+ private final SelfServiceApplicationConfiguration configuration;
+ private final ComputationalService computationalService;
@Inject
- private SelfServiceApplicationConfiguration configuration;
- @Inject
- private ComputationalService computationalService;
+ public ComputationalResourceGcp(SelfServiceApplicationConfiguration configuration, ComputationalService computationalService) {
+ this.configuration = configuration;
+ this.computationalService = computationalService;
+ }
+ @GET
+ @Path("/{project}/{endpoint}/templates")
+ public Response getTemplates(@Auth @Parameter(hidden = true) UserInfo userInfo, @PathParam("project") String project,
+ @PathParam("endpoint") String endpoint) {
+ return Response.ok(computationalService.getComputationalNamesAndTemplates(userInfo, project, endpoint)).build();
+ }
+
/**
* Asynchronously creates Dataproc cluster
*
@@ -134,13 +150,14 @@
* @return 200 OK if operation is successfully triggered
*/
@DELETE
- @Path("/{exploratoryName}/{computationalName}/terminate")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}/terminate")
public Response terminate(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.debug("Terminating computational resource {} for user {}", computationalName, userInfo.getName());
- computationalService.terminateComputational(userInfo, exploratoryName, computationalName);
+ computationalService.terminateComputational(userInfo, projectName, exploratoryName, computationalName);
return Response.ok().build();
}
@@ -156,14 +173,15 @@
@DELETE
@Path("/{project}/{exploratoryName}/{computationalName}/stop")
public Response stop(@Auth UserInfo userInfo,
+ @PathParam("project") String project,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
log.debug("Stopping computational resource {} for user {}", computationalName, userInfo.getName());
- computationalService.stopSparkCluster(userInfo, exploratoryName, computationalName);
+ computationalService.stopSparkCluster(userInfo, project, exploratoryName, computationalName);
return Response.ok().build();
- }
+ }
/**
* Sends request to provisioning service for starting the computational resource for user.
@@ -187,22 +205,24 @@
}
@PUT
- @Path("dataengine/{exploratoryName}/{computationalName}/config")
+ @Path("dataengine/{projectName}/{exploratoryName}/{computationalName}/config")
public Response updateDataEngineConfig(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName,
@Valid @NotNull List<ClusterConfig> config) {
- computationalService.updateSparkClusterConfig(userInfo, exploratoryName, computationalName, config);
+ computationalService.updateSparkClusterConfig(userInfo, projectName, exploratoryName, computationalName, config);
return Response.ok().build();
}
@GET
- @Path("{exploratoryName}/{computationalName}/config")
+ @Path("/{projectName}/{exploratoryName}/{computationalName}/config")
public Response getClusterConfig(@Auth UserInfo userInfo,
+ @PathParam("projectName") String projectName,
@PathParam("exploratoryName") String exploratoryName,
@PathParam("computationalName") String computationalName) {
- return Response.ok(computationalService.getClusterConfig(userInfo, exploratoryName, computationalName)).build();
+ return Response.ok(computationalService.getClusterConfig(userInfo, projectName, exploratoryName, computationalName)).build();
}
private void validate(@Auth UserInfo userInfo, GcpComputationalCreateForm formDTO) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
index 217e18e..4a6f392 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ComputationalService.java
@@ -21,16 +21,17 @@
import com.epam.dlab.auth.UserInfo;
import com.epam.dlab.backendapi.resources.dto.ComputationalCreateFormDTO;
+import com.epam.dlab.backendapi.resources.dto.ComputationalTemplatesDTO;
import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
-import com.epam.dlab.dto.UserInstanceStatus;
import com.epam.dlab.dto.aws.computational.ClusterConfig;
-import com.epam.dlab.dto.base.DataEngineType;
import com.epam.dlab.dto.computational.UserComputationalResource;
import java.util.List;
import java.util.Optional;
public interface ComputationalService {
+ ComputationalTemplatesDTO getComputationalNamesAndTemplates(UserInfo user, String project, String endpoint);
+
/**
* Asynchronously triggers creation of Spark cluster
*
@@ -46,29 +47,25 @@
* Asynchronously triggers termination of computational resources
*
* @param userInfo user info of authenticated user
+ * @param project project name
* @param exploratoryName name of exploratory where to terminate computational resources with
* <code>computationalName</code>
* @param computationalName computational name
*/
- void terminateComputational(UserInfo userInfo, String exploratoryName, String computationalName);
+ void terminateComputational(UserInfo userInfo, String project, String exploratoryName, String computationalName);
boolean createDataEngineService(UserInfo userInfo, ComputationalCreateFormDTO formDTO, UserComputationalResource
computationalResource, String project);
- void stopSparkCluster(UserInfo userInfo, String exploratoryName, String computationalName);
+ void stopSparkCluster(UserInfo userInfo, String project, String exploratoryName, String computationalName);
void startSparkCluster(UserInfo userInfo, String exploratoryName, String computationalName, String project);
- void updateSparkClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName,
+ void updateSparkClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName,
List<ClusterConfig> config);
- void updateComputationalsReuploadKeyFlag(String user, List<UserInstanceStatus> exploratoryStatuses,
- List<DataEngineType> computationalTypes,
- boolean reuploadKeyRequired,
- UserInstanceStatus... computationalStatuses);
-
- Optional<UserComputationalResource> getComputationalResource(String user, String exploratoryName,
+ Optional<UserComputationalResource> getComputationalResource(String user, String project, String exploratoryName,
String computationalName);
- List<ClusterConfig> getClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName);
+ List<ClusterConfig> getClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
index 5ef7cae..f765aa4 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/EnvironmentService.java
@@ -36,17 +36,17 @@
void stopAll();
- void stopEnvironment(UserInfo userInfo, String user);
+ void stopEnvironment(UserInfo userInfo, String user, String project);
void stopEnvironmentWithServiceAccount(String user);
void stopProjectEnvironment(String project);
- void stopExploratory(UserInfo userInfo, String user, String exploratoryName);
+ void stopExploratory(UserInfo userInfo, String user, String project, String exploratoryName);
- void stopComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName);
+ void stopComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName);
- void terminateExploratory(UserInfo userInfo, String user, String exploratoryName);
+ void terminateExploratory(UserInfo userInfo, String user, String project, String exploratoryName);
- void terminateComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName);
+ void terminateComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
index 4348819..2b93a8e 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ExploratoryService.java
@@ -32,29 +32,21 @@
public interface ExploratoryService {
- String start(UserInfo userInfo, String exploratoryName, String project);
+ String start(UserInfo userInfo, String exploratoryName, String project);
- String stop(UserInfo userInfo, String exploratoryName);
+ String stop(UserInfo userInfo, String project, String exploratoryName);
- String terminate(UserInfo userInfo, String exploratoryName);
+ String terminate(UserInfo userInfo, String project, String exploratoryName);
- String create(UserInfo userInfo, Exploratory exploratory, String project);
+ String create(UserInfo userInfo, Exploratory exploratory, String project);
- void updateExploratoryStatuses(String user, UserInstanceStatus status);
+ void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status);
- void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status);
+ void updateClusterConfig(UserInfo userInfo, String project, String exploratoryName, List<ClusterConfig> config);
- void updateExploratoriesReuploadKeyFlag(String user, boolean reuploadKeyRequired,
- UserInstanceStatus... exploratoryStatuses);
+ Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName);
- List<UserInstanceDTO> getInstancesWithStatuses(String user, UserInstanceStatus exploratoryStatus,
- UserInstanceStatus computationalStatus);
+ List<ClusterConfig> getClusterConfig(UserInfo user, String project, String exploratoryName);
- void updateClusterConfig(UserInfo userInfo, String exploratoryName, List<ClusterConfig> config);
-
- Optional<UserInstanceDTO> getUserInstance(String user, String exploratoryName);
-
- List<ClusterConfig> getClusterConfig(UserInfo user, String exploratoryName);
-
- ExploratoryCreatePopUp getUserInstances(UserInfo user);
+ ExploratoryCreatePopUp getUserInstances(UserInfo user);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
index 5091c97..604bdcf 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ImageExploratoryService.java
@@ -27,13 +27,13 @@
public interface ImageExploratoryService {
- String createImage(UserInfo user, String exploratoryName, String imageName, String imageDescription);
+ String createImage(UserInfo user, String project, String exploratoryName, String imageName, String imageDescription);
- void finishImageCreate(Image image, String exploratoryName, String newNotebookIp);
+ void finishImageCreate(Image image, String exploratoryName, String newNotebookIp);
- List<ImageInfoRecord> getNotFailedImages(String user, String dockerImage, String project, String endpoint);
+ List<ImageInfoRecord> getNotFailedImages(String user, String dockerImage, String project, String endpoint);
- ImageInfoRecord getImage(String user, String name, String project, String endpoint);
+ ImageInfoRecord getImage(String user, String name, String project, String endpoint);
- List<ImageInfoRecord> getImagesForProject(String project);
+ List<ImageInfoRecord> getImagesForProject(String project);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
index 7b5cd44..038a7b6 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/InactivityService.java
@@ -24,10 +24,10 @@
public interface InactivityService {
- void updateRunningResourcesLastActivity();
+ void updateRunningResourcesLastActivity();
- void updateLastActivityForExploratory(UserInfo userInfo, String exploratoryName, LocalDateTime lastActivity);
+ void updateLastActivityForExploratory(UserInfo userInfo, String exploratoryName, LocalDateTime lastActivity);
- void updateLastActivityForComputational(UserInfo userInfo, String exploratoryName,
- String computationalName, LocalDateTime lastActivity);
+ void updateLastActivityForComputational(UserInfo userInfo, String project, String exploratoryName,
+ String computationalName, LocalDateTime lastActivity);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
index 5b98293..bdd22f1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/LibraryService.java
@@ -27,12 +27,12 @@
import java.util.List;
public interface LibraryService {
- List<Document> getLibs(String user, String exploratoryName, String computationalName);
+ List<Document> getLibs(String user, String project, String exploratoryName, String computationalName);
- List<LibInfoRecord> getLibInfo(String user, String exploratoryName);
+ List<LibInfoRecord> getLibInfo(String user, String project, String exploratoryName);
- String installComputationalLibs(UserInfo userInfo, String exploratoryName, String computationalName,
- List<LibInstallDTO> libs);
+ String installComputationalLibs(UserInfo userInfo, String project, String exploratoryName, String computationalName,
+ List<LibInstallDTO> libs);
- String installExploratoryLibs(UserInfo userInfo, String exploratoryName, List<LibInstallDTO> libs);
+ String installExploratoryLibs(UserInfo userInfo, String project, String exploratoryName, List<LibInstallDTO> libs);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
index 738fbdb..1fd394b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/ProjectService.java
@@ -36,7 +36,7 @@
void stop(UserInfo userInfo, List<String> endpoints, String name);
- void stopWithResources(UserInfo userInfo, String projectName);
+ void stopWithResources(UserInfo userInfo, List<String> endpoints, String projectName);
void update(UserInfo userInfo, UpdateProjectDTO projectDTO);
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
index 1059db0..7702601 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/SchedulerJobService.java
@@ -25,57 +25,61 @@
import java.util.List;
public interface SchedulerJobService {
- /**
- * Pulls out scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
- *
- * @param user user's name
- * @param exploratoryName name of exploratory resource
- * @return dto object
- */
- SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String exploratoryName);
+ /**
+ * Pulls out scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+ *
+ * @param user user's name
+ * @param project project name
+ * @param exploratoryName name of exploratory resource
+ * @return dto object
+ */
+ SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String project, String exploratoryName);
- /**
- * Pulls out scheduler job data for computational resource <code>computationalName<code/> affiliated with
- * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
- *
- * @param user user's name
- * @param exploratoryName name of exploratory resource
- * @param computationalName name of computational resource
- * @return dto object
- */
- SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String exploratoryName,
- String computationalName);
+ /**
+ * Pulls out scheduler job data for computational resource <code>computationalName<code/> affiliated with
+ * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+ *
+ * @param user user's name
+ * @param project project name
+ * @param exploratoryName name of exploratory resource
+ * @param computationalName name of computational resource
+ * @return dto object
+ */
+ SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String project, String exploratoryName,
+ String computationalName);
- /**
- * Updates scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
- *
- * @param user user's name
- * @param exploratoryName name of exploratory resource
- * @param dto scheduler job data
- */
- void updateExploratorySchedulerData(String user, String exploratoryName, SchedulerJobDTO dto);
+ /**
+ * Updates scheduler job data for user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+ *
+ * @param user user's name
+ * @param project project name
+ * @param exploratoryName name of exploratory resource
+ * @param dto scheduler job data
+ */
+ void updateExploratorySchedulerData(String user, String project, String exploratoryName, SchedulerJobDTO dto);
- /**
- * Updates scheduler job data for computational resource <code>computationalName<code/> affiliated with
- * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
- *
- * @param user user's name
- * @param exploratoryName name of exploratory resource
- * @param computationalName name of computational resource
- * @param dto scheduler job data
- */
- void updateComputationalSchedulerData(String user, String exploratoryName,
- String computationalName, SchedulerJobDTO dto);
+ /**
+ * Updates scheduler job data for computational resource <code>computationalName<code/> affiliated with
+ * user <code>user<code/> and his exploratory <code>exploratoryName<code/>
+ *
+ * @param user user's name
+ * @param project project name
+ * @param exploratoryName name of exploratory resource
+ * @param computationalName name of computational resource
+ * @param dto scheduler job data
+ */
+ void updateComputationalSchedulerData(String user, String project, String exploratoryName,
+ String computationalName, SchedulerJobDTO dto);
- void stopComputationalByScheduler();
+ void stopComputationalByScheduler();
- void stopExploratoryByScheduler();
+ void stopExploratoryByScheduler();
- void startExploratoryByScheduler();
+ void startExploratoryByScheduler();
- void startComputationalByScheduler();
+ void startComputationalByScheduler();
- void terminateExploratoryByScheduler();
+ void terminateExploratoryByScheduler();
void terminateComputationalByScheduler();
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
index 52decf8..722ee4d 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImpl.java
@@ -29,9 +29,11 @@
import com.epam.dlab.backendapi.domain.ProjectDTO;
import com.epam.dlab.backendapi.domain.RequestId;
import com.epam.dlab.backendapi.resources.dto.ComputationalCreateFormDTO;
+import com.epam.dlab.backendapi.resources.dto.ComputationalTemplatesDTO;
import com.epam.dlab.backendapi.resources.dto.SparkStandaloneClusterCreateForm;
import com.epam.dlab.backendapi.service.ComputationalService;
import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.InfrastructureTemplateService;
import com.epam.dlab.backendapi.service.ProjectService;
import com.epam.dlab.backendapi.service.TagService;
import com.epam.dlab.backendapi.util.RequestBuilder;
@@ -41,7 +43,12 @@
import com.epam.dlab.dto.aws.computational.ClusterConfig;
import com.epam.dlab.dto.base.DataEngineType;
import com.epam.dlab.dto.base.computational.ComputationalBase;
-import com.epam.dlab.dto.computational.*;
+import com.epam.dlab.dto.base.computational.FullComputationalTemplate;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStatusDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.SparkStandaloneClusterResource;
+import com.epam.dlab.dto.computational.UserComputationalResource;
import com.epam.dlab.exceptions.DlabException;
import com.epam.dlab.exceptions.ResourceNotFoundException;
import com.epam.dlab.rest.client.RESTService;
@@ -51,12 +58,19 @@
import com.google.inject.name.Named;
import lombok.extern.slf4j.Slf4j;
+import java.util.Collection;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.stream.Collectors;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.FAILED;
+import static com.epam.dlab.dto.UserInstanceStatus.RECONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
import static com.epam.dlab.dto.base.DataEngineType.CLOUD_SERVICE;
import static com.epam.dlab.dto.base.DataEngineType.SPARK_STANDALONE;
import static com.epam.dlab.rest.contracts.ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC;
@@ -78,24 +92,54 @@
DATA_ENGINE_TYPE_TERMINATE_URLS.put(CLOUD_SERVICE, ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC);
}
- @Inject
- private ProjectService projectService;
- @Inject
- private ExploratoryDAO exploratoryDAO;
- @Inject
- private ComputationalDAO computationalDAO;
- @Inject
- @Named(ServiceConsts.PROVISIONING_SERVICE_NAME)
- private RESTService provisioningService;
- @Inject
- private RequestBuilder requestBuilder;
- @Inject
- private RequestId requestId;
- @Inject
- private TagService tagService;
- @Inject
- private EndpointService endpointService;
+ private final ProjectService projectService;
+ private final ExploratoryDAO exploratoryDAO;
+ private final ComputationalDAO computationalDAO;
+ private final RESTService provisioningService;
+ private final RequestBuilder requestBuilder;
+ private final RequestId requestId;
+ private final TagService tagService;
+ private final EndpointService endpointService;
+ private final InfrastructureTemplateService templateService;
+ @Inject
+ public ComputationalServiceImpl(ProjectService projectService, ExploratoryDAO exploratoryDAO, ComputationalDAO computationalDAO,
+ @Named(ServiceConsts.PROVISIONING_SERVICE_NAME) RESTService provisioningService,
+ RequestBuilder requestBuilder, RequestId requestId, TagService tagService,
+ EndpointService endpointService, InfrastructureTemplateService templateService) {
+ this.projectService = projectService;
+ this.exploratoryDAO = exploratoryDAO;
+ this.computationalDAO = computationalDAO;
+ this.provisioningService = provisioningService;
+ this.requestBuilder = requestBuilder;
+ this.requestId = requestId;
+ this.tagService = tagService;
+ this.endpointService = endpointService;
+ this.templateService = templateService;
+ }
+
+
+ @Override
+ public ComputationalTemplatesDTO getComputationalNamesAndTemplates(UserInfo user, String project, String endpoint) {
+ List<FullComputationalTemplate> computationalTemplates = templateService.getComputationalTemplates(user, project, endpoint);
+ List<UserInstanceDTO> userInstances = exploratoryDAO.fetchExploratoryFieldsForProjectWithComp(project);
+
+ List<String> projectComputations = userInstances
+ .stream()
+ .map(UserInstanceDTO::getResources)
+ .flatMap(Collection::stream)
+ .map(UserComputationalResource::getComputationalName)
+ .collect(Collectors.toList());
+ List<String> userComputations = userInstances
+ .stream()
+ .filter(instance -> instance.getUser().equalsIgnoreCase(user.getName()))
+ .map(UserInstanceDTO::getResources)
+ .flatMap(Collection::stream)
+ .map(UserComputationalResource::getComputationalName)
+ .collect(Collectors.toList());
+
+ return new ComputationalTemplatesDTO(computationalTemplates, userComputations, projectComputations);
+ }
@BudgetLimited
@Override
@@ -103,11 +147,11 @@
final ProjectDTO projectDTO = projectService.get(project);
final UserInstanceDTO instance =
- exploratoryDAO.fetchExploratoryFields(userInfo.getName(), form.getNotebookName());
+ exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, form.getNotebookName());
final SparkStandaloneClusterResource compResource = createInitialComputationalResource(form);
compResource.setTags(tagService.getResourceTags(userInfo, instance.getEndpoint(), project,
form.getCustomTag()));
- if (computationalDAO.addComputational(userInfo.getName(), form.getNotebookName(), compResource)) {
+ if (computationalDAO.addComputational(userInfo.getName(), form.getNotebookName(), project, compResource)) {
try {
EndpointDTO endpointDTO = endpointService.get(instance.getEndpoint());
ComputationalBase<?> dto = requestBuilder.newComputationalCreate(userInfo, projectDTO, instance, form, endpointDTO);
@@ -119,7 +163,7 @@
return true;
} catch (RuntimeException e) {
try {
- updateComputationalStatus(userInfo.getName(), form.getNotebookName(), form.getName(), FAILED);
+ updateComputationalStatus(userInfo.getName(), project, form.getNotebookName(), form.getName(), FAILED);
} catch (DlabException d) {
log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, form.getName(), userInfo.getName(), d);
}
@@ -133,15 +177,15 @@
}
@Override
- public void terminateComputational(UserInfo userInfo, String exploratoryName, String computationalName) {
+ public void terminateComputational(UserInfo userInfo, String project, String exploratoryName, String computationalName) {
try {
- updateComputationalStatus(userInfo.getName(), exploratoryName, computationalName, TERMINATING);
+ updateComputationalStatus(userInfo.getName(), project, exploratoryName, computationalName, TERMINATING);
- final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(userInfo.getName(),
+ final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project,
exploratoryName);
- UserComputationalResource compResource = computationalDAO.fetchComputationalFields(userInfo
- .getName(), exploratoryName, computationalName);
+ UserComputationalResource compResource = computationalDAO.fetchComputationalFields(userInfo.getName(), project,
+ exploratoryName, computationalName);
final DataEngineType dataEngineType = compResource.getDataEngineType();
EndpointDTO endpointDTO = endpointService.get(userInstanceDTO.getEndpoint());
@@ -156,7 +200,7 @@
} catch (RuntimeException re) {
try {
- updateComputationalStatus(userInfo.getName(), exploratoryName, computationalName, FAILED);
+ updateComputationalStatus(userInfo.getName(), project, exploratoryName, computationalName, FAILED);
} catch (DlabException e) {
log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, computationalName, userInfo.getName(), e);
}
@@ -171,12 +215,12 @@
UserComputationalResource computationalResource, @Project String project) {
final ProjectDTO projectDTO = projectService.get(project);
- final UserInstanceDTO instance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), formDTO
+ final UserInstanceDTO instance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, formDTO
.getNotebookName());
final Map<String, String> tags = tagService.getResourceTags(userInfo, instance.getEndpoint(), project,
formDTO.getCustomTag());
computationalResource.setTags(tags);
- boolean isAdded = computationalDAO.addComputational(userInfo.getName(), formDTO.getNotebookName(),
+ boolean isAdded = computationalDAO.addComputational(userInfo.getName(), formDTO.getNotebookName(), project,
computationalResource);
if (isAdded) {
@@ -191,8 +235,8 @@
return true;
} catch (Exception t) {
try {
- updateComputationalStatus(userInfo.getName(), formDTO.getNotebookName(), formDTO.getName(),
- FAILED);
+ updateComputationalStatus(userInfo.getName(), project, formDTO.getNotebookName(),
+ formDTO.getName(), FAILED);
} catch (DlabException e) {
log.error(COULD_NOT_UPDATE_THE_STATUS_MSG_FORMAT, formDTO.getName(), userInfo.getName(), e);
}
@@ -206,12 +250,12 @@
}
@Override
- public void stopSparkCluster(UserInfo userInfo, String expName, String compName) {
- final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), expName, true);
+ public void stopSparkCluster(UserInfo userInfo, String project, String expName, String compName) {
+ final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, expName, true);
final UserInstanceStatus requiredStatus = UserInstanceStatus.RUNNING;
if (computationalWithStatusResourceExist(compName, userInstance, requiredStatus)) {
log.debug("{} spark cluster {} for userInstance {}", STOPPING.toString(), compName, expName);
- updateComputationalStatus(userInfo.getName(), expName, compName, STOPPING);
+ updateComputationalStatus(userInfo.getName(), project, expName, compName, STOPPING);
EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
final String uuid =
provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_STOP_SPARK,
@@ -230,11 +274,11 @@
@Override
public void startSparkCluster(UserInfo userInfo, String expName, String compName, @Project String project) {
final UserInstanceDTO userInstance =
- exploratoryDAO.fetchExploratoryFields(userInfo.getName(), expName, true);
+ exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, expName, true);
final UserInstanceStatus requiredStatus = UserInstanceStatus.STOPPED;
if (computationalWithStatusResourceExist(compName, userInstance, requiredStatus)) {
log.debug("{} spark cluster {} for userInstance {}", STARTING.toString(), compName, expName);
- updateComputationalStatus(userInfo.getName(), expName, compName, STARTING);
+ updateComputationalStatus(userInfo.getName(), project, expName, compName, STARTING);
EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
final String uuid =
provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_START_SPARK,
@@ -249,12 +293,12 @@
}
@Override
- public void updateSparkClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName,
+ public void updateSparkClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName,
List<ClusterConfig> config) {
final String userName = userInfo.getName();
final String token = userInfo.getAccessToken();
final UserInstanceDTO userInstanceDTO = exploratoryDAO
- .fetchExploratoryFields(userName, exploratoryName, true);
+ .fetchExploratoryFields(userName, project, exploratoryName, true);
final UserComputationalResource compResource = userInstanceDTO
.getResources()
.stream()
@@ -269,6 +313,7 @@
provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_RECONFIGURE_SPARK,
token, clusterConfigDto, String.class);
computationalDAO.updateComputationalFields(new ComputationalStatusDTO()
+ .withProject(userInstanceDTO.getProject())
.withComputationalName(computationalName)
.withExploratoryName(exploratoryName)
.withConfig(config)
@@ -279,37 +324,19 @@
}
/**
- * Updates parameter 'reuploadKeyRequired' for corresponding user's computational resources with allowable statuses
- * which are affiliated with exploratories with theirs allowable statuses.
- *
- * @param user user.
- * @param exploratoryStatuses allowable exploratories' statuses.
- * @param computationalTypes type list of computational resource.
- * @param reuploadKeyRequired true/false.
- * @param computationalStatuses allowable statuses for computational resources.
- */
- @Override
- public void updateComputationalsReuploadKeyFlag(String user, List<UserInstanceStatus> exploratoryStatuses,
- List<DataEngineType> computationalTypes,
- boolean reuploadKeyRequired,
- UserInstanceStatus... computationalStatuses) {
- computationalDAO.updateReuploadKeyFlagForComputationalResources(user, exploratoryStatuses, computationalTypes,
- reuploadKeyRequired, computationalStatuses);
- }
-
- /**
* Returns computational resource's data by name for user's exploratory.
*
- * @param user user.
+ * @param user user
+ * @param project name of project
* @param exploratoryName name of exploratory.
* @param computationalName name of computational resource.
* @return corresponding computational resource's data or empty data if resource doesn't exist.
*/
@Override
- public Optional<UserComputationalResource> getComputationalResource(String user, String exploratoryName,
+ public Optional<UserComputationalResource> getComputationalResource(String user, String project, String exploratoryName,
String computationalName) {
try {
- return Optional.of(computationalDAO.fetchComputationalFields(user, exploratoryName, computationalName));
+ return Optional.of(computationalDAO.fetchComputationalFields(user, project, exploratoryName, computationalName));
} catch (DlabException e) {
log.warn("Computational resource {} affiliated with exploratory {} for user {} not found.",
computationalName, exploratoryName, user);
@@ -318,22 +345,24 @@
}
@Override
- public List<ClusterConfig> getClusterConfig(UserInfo userInfo, String exploratoryName, String computationalName) {
- return computationalDAO.getClusterConfig(userInfo.getName(), exploratoryName, computationalName);
+ public List<ClusterConfig> getClusterConfig(UserInfo userInfo, String project, String exploratoryName, String computationalName) {
+ return computationalDAO.getClusterConfig(userInfo.getName(), project, exploratoryName, computationalName);
}
/**
* Updates the status of computational resource in database.
*
* @param user user name.
+ * @param project project name
* @param exploratoryName name of exploratory.
* @param computationalName name of computational resource.
* @param status status
*/
- private void updateComputationalStatus(String user, String exploratoryName, String computationalName,
+ private void updateComputationalStatus(String user, String project, String exploratoryName, String computationalName,
UserInstanceStatus status) {
ComputationalStatusDTO computationalStatus = new ComputationalStatusDTO()
.withUser(user)
+ .withProject(project)
.withExploratoryName(exploratoryName)
.withComputationalName(computationalName)
.withStatus(status);
@@ -367,5 +396,4 @@
compResource.getDataEngineType() == SPARK_STANDALONE &&
compResource.getComputationalName().equals(computationalName);
}
-
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
index 83cebcf..f86fd72 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImpl.java
@@ -114,11 +114,11 @@
}
@Override
- public void stopEnvironment(UserInfo userInfo, String user) {
+ public void stopEnvironment(UserInfo userInfo, String user, String project) {
log.debug("Stopping environment for user {}", user);
checkState(user, "stop");
exploratoryDAO.fetchRunningExploratoryFields(user)
- .forEach(e -> stopExploratory(userInfo, user, e.getExploratoryName()));
+ .forEach(e -> stopExploratory(userInfo, user, project, e.getExploratoryName()));
}
@Override
@@ -143,24 +143,24 @@
}
@Override
- public void stopExploratory(UserInfo userInfo, String user, String exploratoryName) {
- exploratoryService.stop(new UserInfo(user, userInfo.getAccessToken()), exploratoryName);
+ public void stopExploratory(UserInfo userInfo, String user, String project, String exploratoryName) {
+ exploratoryService.stop(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName);
}
@Override
- public void stopComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName) {
- computationalService.stopSparkCluster(new UserInfo(user, userInfo.getAccessToken()),
- exploratoryName, computationalName);
+ public void stopComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName) {
+ computationalService.stopSparkCluster(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName,
+ computationalName);
}
@Override
- public void terminateExploratory(UserInfo userInfo, String user, String exploratoryName) {
- exploratoryService.terminate(new UserInfo(user, userInfo.getAccessToken()), exploratoryName);
+ public void terminateExploratory(UserInfo userInfo, String user, String project, String exploratoryName) {
+ exploratoryService.terminate(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName);
}
@Override
- public void terminateComputational(UserInfo userInfo, String user, String exploratoryName, String computationalName) {
- computationalService.terminateComputational(new UserInfo(user, userInfo.getAccessToken()), exploratoryName,
+ public void terminateComputational(UserInfo userInfo, String user, String project, String exploratoryName, String computationalName) {
+ computationalService.terminateComputational(new UserInfo(user, userInfo.getAccessToken()), project, exploratoryName,
computationalName);
}
@@ -183,7 +183,7 @@
private void stopNotebookWithServiceAccount(UserInstanceDTO instance) {
final UserInfo userInfo = securityService.getServiceAccountInfo(instance.getUser());
- exploratoryService.stop(userInfo, instance.getExploratoryName());
+ exploratoryService.stop(userInfo, instance.getProject(), instance.getExploratoryName());
}
private List<UserResourceInfo> getProjectEnv(ProjectDTO projectDTO, List<UserInstanceDTO> allInstances) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
index 17b8967..77d870b 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImpl.java
@@ -105,17 +105,17 @@
@BudgetLimited
@Override
public String start(UserInfo userInfo, String exploratoryName, @Project String project) {
- return action(userInfo, exploratoryName, EXPLORATORY_START, STARTING);
+ return action(userInfo, project, exploratoryName, EXPLORATORY_START, STARTING);
}
@Override
- public String stop(UserInfo userInfo, String exploratoryName) {
- return action(userInfo, exploratoryName, EXPLORATORY_STOP, STOPPING);
+ public String stop(UserInfo userInfo, String project, String exploratoryName) {
+ return action(userInfo, project, exploratoryName, EXPLORATORY_STOP, STOPPING);
}
@Override
- public String terminate(UserInfo userInfo, String exploratoryName) {
- return action(userInfo, exploratoryName, EXPLORATORY_TERMINATE, TERMINATING);
+ public String terminate(UserInfo userInfo, String project, String exploratoryName) {
+ return action(userInfo, project, exploratoryName, EXPLORATORY_TERMINATE, TERMINATING);
}
@BudgetLimited
@@ -142,7 +142,7 @@
log.error("Could not update the status of exploratory environment {} with name {} for user {}",
exploratory.getDockerImage(), exploratory.getName(), userInfo.getName(), t);
if (isAdded) {
- updateExploratoryStatusSilent(userInfo.getName(), exploratory.getName(), FAILED);
+ updateExploratoryStatusSilent(userInfo.getName(), project, exploratory.getName(), FAILED);
}
throw new DlabException("Could not create exploratory environment " + exploratory.getName() + " for user "
+ userInfo.getName() + ": " + Optional.ofNullable(t.getCause()).map(Throwable::getMessage).orElse(t.getMessage()), t);
@@ -150,53 +150,16 @@
}
@Override
- public void updateExploratoryStatuses(String user, UserInstanceStatus status) {
- exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(user, TERMINATED, FAILED)
- .forEach(ui -> updateExploratoryStatus(ui.getExploratoryName(), status, user));
- }
-
- @Override
public void updateProjectExploratoryStatuses(String project, String endpoint, UserInstanceStatus status) {
exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(project, endpoint, TERMINATED, FAILED)
- .forEach(ui -> updateExploratoryStatus(ui.getExploratoryName(), status, ui.getUser()));
- }
-
- /**
- * Updates parameter 'reuploadKeyRequired' for corresponding user's exploratories with allowable statuses.
- *
- * @param user user.
- * @param reuploadKeyRequired true/false.
- * @param exploratoryStatuses allowable exploratories' statuses.
- */
- @Override
- public void updateExploratoriesReuploadKeyFlag(String user, boolean reuploadKeyRequired,
- UserInstanceStatus... exploratoryStatuses) {
- exploratoryDAO.updateReuploadKeyForExploratories(user, reuploadKeyRequired, exploratoryStatuses);
- }
-
- /**
- * Returns list of user's exploratories and corresponding computational resources where both of them have
- * predefined statuses.
- *
- * @param user user.
- * @param exploratoryStatus status for exploratory environment.
- * @param computationalStatus status for computational resource affiliated with the exploratory.
- * @return list with user instances.
- */
- @Override
- public List<UserInstanceDTO> getInstancesWithStatuses(String user, UserInstanceStatus exploratoryStatus,
- UserInstanceStatus computationalStatus) {
- return getExploratoriesWithStatus(user, exploratoryStatus).stream()
- .map(e -> e.withResources(computationalResourcesWithStatus(e, computationalStatus)))
- .collect(Collectors.toList());
+ .forEach(ui -> updateExploratoryStatus(project, ui.getExploratoryName(), status, ui.getUser()));
}
@Override
- public void updateClusterConfig(UserInfo userInfo, String exploratoryName, List<ClusterConfig> config) {
+ public void updateClusterConfig(UserInfo userInfo, String project, String exploratoryName, List<ClusterConfig> config) {
final String userName = userInfo.getName();
final String token = userInfo.getAccessToken();
- final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchRunningExploratoryFields(userName,
- exploratoryName);
+ final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchRunningExploratoryFields(userName, project, exploratoryName);
EndpointDTO endpointDTO = endpointService.get(userInstanceDTO.getEndpoint());
final ExploratoryReconfigureSparkClusterActionDTO updateClusterConfigDTO =
requestBuilder.newClusterConfigUpdate(userInfo, userInstanceDTO, config, endpointDTO);
@@ -206,6 +169,7 @@
requestId.put(userName, uuid);
exploratoryDAO.updateExploratoryFields(new ExploratoryStatusDTO()
.withUser(userName)
+ .withProject(project)
.withExploratoryName(exploratoryName)
.withConfig(config)
.withStatus(UserInstanceStatus.RECONFIGURING.toString()));
@@ -215,13 +179,14 @@
* Returns user instance's data by it's name.
*
* @param user user.
+ * @param project
* @param exploratoryName name of exploratory.
* @return corresponding user instance's data or empty data if resource doesn't exist.
*/
@Override
- public Optional<UserInstanceDTO> getUserInstance(String user, String exploratoryName) {
+ public Optional<UserInstanceDTO> getUserInstance(String user, String project, String exploratoryName) {
try {
- return Optional.of(exploratoryDAO.fetchExploratoryFields(user, exploratoryName));
+ return Optional.of(exploratoryDAO.fetchExploratoryFields(user, project, exploratoryName));
} catch (DlabException e) {
log.warn("User instance with exploratory name {} for user {} not found.", exploratoryName, user);
}
@@ -229,8 +194,8 @@
}
@Override
- public List<ClusterConfig> getClusterConfig(UserInfo user, String exploratoryName) {
- return exploratoryDAO.getClusterConfig(user.getName(), exploratoryName);
+ public List<ClusterConfig> getClusterConfig(UserInfo user, String project, String exploratoryName) {
+ return exploratoryDAO.getClusterConfig(user.getName(), project, exploratoryName);
}
@Override
@@ -256,30 +221,20 @@
}
/**
- * Returns list of user's exploratories with predefined status.
- *
- * @param user user.
- * @param status status for exploratory environment.
- * @return list of user's instances.
- */
- private List<UserInstanceDTO> getExploratoriesWithStatus(String user, UserInstanceStatus status) {
- return exploratoryDAO.fetchUserExploratoriesWhereStatusIn(user, true, status);
- }
-
- /**
* Sends the post request to the provisioning service and update the status of exploratory environment.
*
* @param userInfo user info.
+ * @param project name of project
* @param exploratoryName name of exploratory environment.
* @param action action for exploratory environment.
* @param status status for exploratory environment.
* @return Invocation request as JSON string.
*/
- private String action(UserInfo userInfo, String exploratoryName, String action, UserInstanceStatus status) {
+ private String action(UserInfo userInfo, String project, String exploratoryName, String action, UserInstanceStatus status) {
try {
- updateExploratoryStatus(exploratoryName, status, userInfo.getName());
+ updateExploratoryStatus(project, exploratoryName, status, userInfo.getName());
- UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), exploratoryName);
+ UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(userInfo.getName(), project, exploratoryName);
EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
final String uuid =
provisioningService.post(endpointDTO.getUrl() + action, userInfo.getAccessToken(),
@@ -289,7 +244,7 @@
} catch (Exception t) {
log.error("Could not {} exploratory environment {} for user {}",
StringUtils.substringAfter(action, "/"), exploratoryName, userInfo.getName(), t);
- updateExploratoryStatusSilent(userInfo.getName(), exploratoryName, FAILED);
+ updateExploratoryStatusSilent(userInfo.getName(), project, exploratoryName, FAILED);
final String errorMsg = String.format("Could not %s exploratory environment %s: %s",
StringUtils.substringAfter(action, "/"), exploratoryName,
Optional.ofNullable(t.getCause()).map(Throwable::getMessage).orElse(t.getMessage()));
@@ -297,15 +252,15 @@
}
}
- private void updateExploratoryStatus(String exploratoryName, UserInstanceStatus status, String user) {
- updateExploratoryStatus(user, exploratoryName, status);
+ private void updateExploratoryStatus(String project, String exploratoryName, UserInstanceStatus status, String user) {
+ updateExploratoryStatus(user, project, exploratoryName, status);
if (status == STOPPING) {
- updateComputationalStatuses(user, exploratoryName, STOPPING, TERMINATING, FAILED, TERMINATED, STOPPED);
+ updateComputationalStatuses(user, project, exploratoryName, STOPPING, TERMINATING, FAILED, TERMINATED, STOPPED);
} else if (status == TERMINATING) {
- updateComputationalStatuses(user, exploratoryName, TERMINATING, TERMINATING, TERMINATED, FAILED);
+ updateComputationalStatuses(user, project, exploratoryName, TERMINATING, TERMINATING, TERMINATED, FAILED);
} else if (status == TERMINATED) {
- updateComputationalStatuses(user, exploratoryName, TERMINATED, TERMINATED, TERMINATED, FAILED);
+ updateComputationalStatuses(user, project, exploratoryName, TERMINATED, TERMINATED, TERMINATED, FAILED);
}
}
@@ -327,11 +282,12 @@
* Updates the status of exploratory environment.
*
* @param user user name
+ * @param project project name
* @param exploratoryName name of exploratory environment.
* @param status status for exploratory environment.
*/
- private void updateExploratoryStatus(String user, String exploratoryName, UserInstanceStatus status) {
- StatusEnvBaseDTO<?> exploratoryStatus = createStatusDTO(user, exploratoryName, status);
+ private void updateExploratoryStatus(String user, String project, String exploratoryName, UserInstanceStatus status) {
+ StatusEnvBaseDTO<?> exploratoryStatus = createStatusDTO(user, project, exploratoryName, status);
exploratoryDAO.updateExploratoryStatus(exploratoryStatus);
}
@@ -339,36 +295,39 @@
* Updates the status of exploratory environment without exceptions. If exception occurred then logging it.
*
* @param user user name
+ * @param project project name
* @param exploratoryName name of exploratory environment.
* @param status status for exploratory environment.
*/
- private void updateExploratoryStatusSilent(String user, String exploratoryName, UserInstanceStatus status) {
+ private void updateExploratoryStatusSilent(String user, String project, String exploratoryName, UserInstanceStatus status) {
try {
- updateExploratoryStatus(user, exploratoryName, status);
+ updateExploratoryStatus(user, project, exploratoryName, status);
} catch (DlabException e) {
log.error("Could not update the status of exploratory environment {} for user {} to {}",
exploratoryName, user, status, e);
}
}
- private void updateComputationalStatuses(String user, String exploratoryName, UserInstanceStatus
+ private void updateComputationalStatuses(String user, String project, String exploratoryName, UserInstanceStatus
dataEngineStatus, UserInstanceStatus dataEngineServiceStatus, UserInstanceStatus... excludedStatuses) {
log.debug("updating status for all computational resources of {} for user {}: DataEngine {}, " +
"dataengine-service {}", exploratoryName, user, dataEngineStatus, dataEngineServiceStatus);
- computationalDAO.updateComputationalStatusesForExploratory(user, exploratoryName, dataEngineStatus,
- dataEngineServiceStatus, excludedStatuses);
+ computationalDAO.updateComputationalStatusesForExploratory(user, project, exploratoryName,
+ dataEngineStatus, dataEngineServiceStatus, excludedStatuses);
}
/**
* Instantiates and returns the descriptor of exploratory environment status.
*
* @param user user name
+ * @param project project
* @param exploratoryName name of exploratory environment.
* @param status status for exploratory environment.
*/
- private StatusEnvBaseDTO<?> createStatusDTO(String user, String exploratoryName, UserInstanceStatus status) {
+ private StatusEnvBaseDTO<?> createStatusDTO(String user, String project, String exploratoryName, UserInstanceStatus status) {
return new ExploratoryStatusDTO()
.withUser(user)
+ .withProject(project)
.withExploratoryName(exploratoryName)
.withStatus(status);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
index bd00f38..5cb3a64 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImpl.java
@@ -24,9 +24,11 @@
import com.epam.dlab.backendapi.dao.ExploratoryLibDAO;
import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
import com.epam.dlab.backendapi.service.EndpointService;
import com.epam.dlab.backendapi.service.ImageExploratoryService;
+import com.epam.dlab.backendapi.service.ProjectService;
import com.epam.dlab.backendapi.util.RequestBuilder;
import com.epam.dlab.constants.ServiceConsts;
import com.epam.dlab.dto.UserInstanceDTO;
@@ -71,17 +73,19 @@
private RequestBuilder requestBuilder;
@Inject
private EndpointService endpointService;
+ @Inject
+ private ProjectService projectService;
@Override
- public String createImage(UserInfo user, String exploratoryName, String imageName, String imageDescription) {
-
- UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(user.getName(), exploratoryName);
+ public String createImage(UserInfo user, String project, String exploratoryName, String imageName, String imageDescription) {
+ ProjectDTO projectDTO = projectService.get(project);
+ UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(user.getName(), project, exploratoryName);
if (imageExploratoryDao.exist(imageName, userInstance.getProject())) {
log.error(String.format(IMAGE_EXISTS_MSG, imageName, userInstance.getProject()));
throw new ResourceAlreadyExistException(String.format(IMAGE_EXISTS_MSG, imageName, userInstance.getProject()));
}
- final List<Library> libraries = libDAO.getLibraries(user.getName(), exploratoryName);
+ final List<Library> libraries = libDAO.getLibraries(user.getName(), project, exploratoryName);
imageExploratoryDao.save(Image.builder()
.name(imageName)
@@ -98,13 +102,14 @@
exploratoryDAO.updateExploratoryStatus(new ExploratoryStatusDTO()
.withUser(user.getName())
+ .withProject(project)
.withExploratoryName(exploratoryName)
.withStatus(UserInstanceStatus.CREATING_IMAGE));
EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
return provisioningService.post(endpointDTO.getUrl() + ExploratoryAPI.EXPLORATORY_IMAGE,
user.getAccessToken(),
- requestBuilder.newExploratoryImageCreate(user, userInstance, imageName, endpointDTO), String.class);
+ requestBuilder.newExploratoryImageCreate(user, userInstance, imageName, endpointDTO, projectDTO), String.class);
}
@Override
@@ -113,13 +118,14 @@
exploratoryName, image.getUser());
exploratoryDAO.updateExploratoryStatus(new ExploratoryStatusDTO()
.withUser(image.getUser())
+ .withProject(image.getProject())
.withExploratoryName(exploratoryName)
.withStatus(UserInstanceStatus.RUNNING));
imageExploratoryDao.updateImageFields(image);
if (newNotebookIp != null) {
log.debug("Changing exploratory ip with name {} for user {} to {}", exploratoryName, image.getUser(),
newNotebookIp);
- exploratoryDAO.updateExploratoryIp(image.getUser(), newNotebookIp, exploratoryName);
+ exploratoryDAO.updateExploratoryIp(image.getUser(), image.getProject(), newNotebookIp, exploratoryName);
}
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
index 1d3230f..dd370dd 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/InactivityServiceImpl.java
@@ -75,9 +75,9 @@
}
@Override
- public void updateLastActivityForComputational(UserInfo userInfo, String exploratoryName,
+ public void updateLastActivityForComputational(UserInfo userInfo, String project, String exploratoryName,
String computationalName, LocalDateTime lastActivity) {
- computationalDAO.updateLastActivity(userInfo.getName(), exploratoryName, computationalName, lastActivity);
+ computationalDAO.updateLastActivity(userInfo.getName(), project, exploratoryName, computationalName, lastActivity);
}
private void updateLastActivity(UserInstanceDTO ui) {
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
index 3d65a15..3fbb170 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImpl.java
@@ -50,7 +50,13 @@
import org.apache.commons.lang3.StringUtils;
import org.bson.Document;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.TreeMap;
import java.util.stream.Collectors;
@Slf4j
@@ -80,13 +86,13 @@
@Override
@SuppressWarnings("unchecked")
- public List<Document> getLibs(String user, String exploratoryName, String computationalName) {
+ public List<Document> getLibs(String user, String project, String exploratoryName, String computationalName) {
if (StringUtils.isEmpty(computationalName)) {
- return (List<Document>) libraryDAO.findExploratoryLibraries(user, exploratoryName)
+ return (List<Document>) libraryDAO.findExploratoryLibraries(user, project, exploratoryName)
.getOrDefault(ExploratoryLibDAO.EXPLORATORY_LIBS, new ArrayList<>());
} else {
- Document document = (Document) libraryDAO.findComputationalLibraries(user, exploratoryName,
- computationalName)
+ Document document = (Document) libraryDAO.findComputationalLibraries(user, project,
+ exploratoryName, computationalName)
.getOrDefault(ExploratoryLibDAO.COMPUTATIONAL_LIBS, new Document());
return (List<Document>) document.getOrDefault(computationalName, new ArrayList<>());
@@ -95,8 +101,8 @@
@Override
@SuppressWarnings("unchecked")
- public List<LibInfoRecord> getLibInfo(String user, String exploratoryName) {
- Document document = libraryDAO.findAllLibraries(user, exploratoryName);
+ public List<LibInfoRecord> getLibInfo(String user, String project, String exploratoryName) {
+ Document document = libraryDAO.findAllLibraries(user, project, exploratoryName);
Map<LibKey, List<LibraryStatus>> model = new TreeMap<>(Comparator.comparing(LibKey::getName)
.thenComparing(LibKey::getVersion)
@@ -124,52 +130,52 @@
}
@Override
- public String installComputationalLibs(UserInfo ui, String expName, String compName,
+ public String installComputationalLibs(UserInfo ui, String project, String expName, String compName,
List<LibInstallDTO> libs) {
- final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(ui.getName(), expName, compName);
+ final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(ui.getName(), project, expName, compName);
EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
final String uuid =
provisioningService.post(endpointDTO.getUrl() + ComputationalAPI.COMPUTATIONAL_LIB_INSTALL,
ui.getAccessToken(),
- toComputationalLibraryInstallDto(ui, expName, compName, libs, userInstance, endpointDTO),
+ toComputationalLibraryInstallDto(ui, project, expName, compName, libs, userInstance, endpointDTO),
String.class);
requestId.put(ui.getName(), uuid);
return uuid;
}
@Override
- public String installExploratoryLibs(UserInfo ui, String expName, List<LibInstallDTO> libs) {
- final UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(ui.getName(), expName);
+ public String installExploratoryLibs(UserInfo ui, String project, String expName, List<LibInstallDTO> libs) {
+ final UserInstanceDTO userInstance = exploratoryDAO.fetchRunningExploratoryFields(ui.getName(), project, expName);
EndpointDTO endpointDTO = endpointService.get(userInstance.getEndpoint());
final String uuid =
provisioningService.post(endpointDTO.getUrl() + ExploratoryAPI.EXPLORATORY_LIB_INSTALL,
- ui.getAccessToken(), toExploratoryLibraryInstallDto(ui, expName, libs, userInstance, endpointDTO),
+ ui.getAccessToken(), toExploratoryLibraryInstallDto(ui, project, expName, libs, userInstance, endpointDTO),
String.class);
requestId.put(ui.getName(), uuid);
return uuid;
}
- private LibraryInstallDTO toExploratoryLibraryInstallDto(UserInfo userInfo, String exploratoryName,
+ private LibraryInstallDTO toExploratoryLibraryInstallDto(UserInfo userInfo, String project, String exploratoryName,
List<LibInstallDTO> libs, UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
final List<LibInstallDTO> libsToInstall = libs.stream()
- .map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), exploratoryName,
+ .map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), project, exploratoryName,
lib.getGroup(), lib.getName())))
- .peek(l -> libraryDAO.addLibrary(userInfo.getName(), exploratoryName, l, l.isOverride()))
+ .peek(l -> libraryDAO.addLibrary(userInfo.getName(), project, exploratoryName, l, l.isOverride()))
.collect(Collectors.toList());
return requestBuilder.newLibInstall(userInfo, userInstance, endpointDTO, libsToInstall);
}
- private LibraryInstallDTO toComputationalLibraryInstallDto(UserInfo userInfo, String expName, String compName,
- List<LibInstallDTO> libs,
+ private LibraryInstallDTO toComputationalLibraryInstallDto(UserInfo userInfo, String project, String expName,
+ String compName, List<LibInstallDTO> libs,
UserInstanceDTO userInstance, EndpointDTO endpointDTO) {
final UserComputationalResource computationalResource = getComputationalResource(compName, userInstance);
final List<LibInstallDTO> libsToInstall = libs.stream()
- .map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), expName,
- compName, lib.getGroup(), lib.getName())))
- .peek(l -> libraryDAO.addLibrary(userInfo.getName(), expName, compName, l,
- l.isOverride()))
+ .map(lib -> toLibInstallDto(lib, libraryDAO.getLibrary(userInfo.getName(), project,
+ expName, compName, lib.getGroup(), lib.getName())))
+ .peek(l -> libraryDAO.addLibrary(userInfo.getName(), project, expName, compName,
+ l, l.isOverride()))
.collect(Collectors.toList());
return requestBuilder.newLibInstall(userInfo, userInstance, computationalResource, libsToInstall, endpointDTO);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
index dcf9699..ca4effa 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ProjectServiceImpl.java
@@ -83,6 +83,7 @@
public List<ProjectManagingDTO> getProjectsForManaging() {
return projectDAO.getProjects().stream().map(p -> new ProjectManagingDTO(
p.getName(), p.getBudget(), isCanBeStopped(p), isCanBeTerminated(p)))
+ .filter(projectManagingDTO -> projectManagingDTO.isCanBeTerminated())
.collect(Collectors.toList());
}
@@ -172,15 +173,23 @@
}
@Override
- public void stopWithResources(UserInfo userInfo, String projectName) {
- List<ProjectEndpointDTO> endpoints = get(projectName).getEndpoints();
- checkProjectRelatedResourcesInProgress(projectName, endpoints, STOP_ACTION);
+ public void stopWithResources(UserInfo userInfo, List<String> endpoints, String projectName) {
+ List<ProjectEndpointDTO> endpointDTOs = get(projectName)
+ .getEndpoints()
+ .stream()
+ .filter(projectEndpointDTO -> endpoints.contains(projectEndpointDTO.getName()))
+ .collect(Collectors.toList());
+ checkProjectRelatedResourcesInProgress(projectName, endpointDTOs, STOP_ACTION);
- exploratoryDAO.fetchRunningExploratoryFieldsForProject(projectName).forEach(e ->
- exploratoryService.stop(new UserInfo(e.getUser(), userInfo.getAccessToken()), e.getExploratoryName()));
+ exploratoryDAO.fetchRunningExploratoryFieldsForProject(projectName,
+ endpointDTOs
+ .stream()
+ .map(ProjectEndpointDTO::getName)
+ .collect(Collectors.toList()))
+ .forEach(e -> exploratoryService.stop(new UserInfo(e.getUser(), userInfo.getAccessToken()), projectName, e.getExploratoryName()));
- endpoints.stream().filter(e -> !Arrays.asList(UserInstanceStatus.TERMINATED,
- UserInstanceStatus.TERMINATING, UserInstanceStatus.STOPPED).contains(e.getStatus()))
+ endpointDTOs.stream().filter(e -> !Arrays.asList(UserInstanceStatus.TERMINATED,
+ UserInstanceStatus.TERMINATING, UserInstanceStatus.STOPPED, UserInstanceStatus.FAILED).contains(e.getStatus()))
.forEach(e -> stop(userInfo, e.getName(), projectName));
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
index 0efe7f6..e75d9df 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImpl.java
@@ -19,7 +19,6 @@
package com.epam.dlab.backendapi.service.impl;
-import com.epam.dlab.auth.UserInfo;
import com.epam.dlab.backendapi.dao.ComputationalDAO;
import com.epam.dlab.backendapi.dao.ExploratoryDAO;
import com.epam.dlab.backendapi.domain.RequestId;
@@ -27,11 +26,8 @@
import com.epam.dlab.backendapi.service.ReuploadKeyService;
import com.epam.dlab.backendapi.util.RequestBuilder;
import com.epam.dlab.dto.UserInstanceStatus;
-import com.epam.dlab.dto.base.DataEngineType;
-import com.epam.dlab.dto.reuploadkey.ReuploadKeyDTO;
import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatus;
import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatusDTO;
-import com.epam.dlab.exceptions.DlabException;
import com.epam.dlab.model.ResourceData;
import com.epam.dlab.model.ResourceType;
import com.epam.dlab.rest.client.RESTService;
@@ -39,16 +35,9 @@
import com.google.inject.Singleton;
import com.google.inject.name.Named;
import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.UUID;
import static com.epam.dlab.constants.ServiceConsts.PROVISIONING_SERVICE_NAME;
-import static com.epam.dlab.dto.UserInstanceStatus.REUPLOADING_KEY;
import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-import static com.epam.dlab.rest.contracts.KeyAPI.REUPLOAD_KEY;
@Singleton
@Slf4j
@@ -75,34 +64,33 @@
@Override
public void updateResourceData(ReuploadKeyStatusDTO dto) {
- String user = dto.getUser();
- ResourceData resource = dto.getReuploadKeyCallbackDTO().getResource();
- log.debug("Updating resource {} to status RUNNING...", resource.toString());
- updateResourceStatus(user, resource, RUNNING);
- if (dto.getReuploadKeyStatus() == ReuploadKeyStatus.COMPLETED) {
- log.debug(REUPLOAD_KEY_UPDATE_MSG, resource.toString());
- updateResourceReuploadKeyFlag(user, resource, false);
- } else {
- log.error(REUPLOAD_KEY_ERROR_MSG, resource.toString());
- }
- }
+ String user = dto.getUser();
+ ResourceData resource = dto.getReuploadKeyCallbackDTO().getResource();
+ log.debug("Updating resource {} to status RUNNING...", resource.toString());
+ updateResourceStatus(user, null, resource, RUNNING);
+ if (dto.getReuploadKeyStatus() == ReuploadKeyStatus.COMPLETED) {
+ log.debug(REUPLOAD_KEY_UPDATE_MSG, resource.toString());
+ updateResourceReuploadKeyFlag(user, null, resource, false);
+ } else {
+ log.error(REUPLOAD_KEY_ERROR_MSG, resource.toString());
+ }
+ }
- private void updateResourceStatus(String user, ResourceData resourceData, UserInstanceStatus newStatus) {
- if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
- exploratoryDAO.updateStatusForExploratory(user, resourceData.getExploratoryName(), newStatus);
- } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
- computationalDAO.updateStatusForComputationalResource(user, resourceData.getExploratoryName(),
- resourceData.getComputationalName(), newStatus);
- }
- }
+ private void updateResourceStatus(String user, String project, ResourceData resourceData, UserInstanceStatus newStatus) {
+ if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
+ exploratoryDAO.updateStatusForExploratory(user, project, resourceData.getExploratoryName(), newStatus);
+ } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
+ computationalDAO.updateStatusForComputationalResource(user, project,
+ resourceData.getExploratoryName(), resourceData.getComputationalName(), newStatus);
+ }
+ }
- private void updateResourceReuploadKeyFlag(String user, ResourceData resourceData, boolean reuploadKeyRequired) {
- if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
- exploratoryDAO.updateReuploadKeyForExploratory(user, resourceData.getExploratoryName(),
- reuploadKeyRequired);
- } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
- computationalDAO.updateReuploadKeyFlagForComputationalResource(user, resourceData.getExploratoryName(),
- resourceData.getComputationalName(), reuploadKeyRequired);
- }
- }
+ private void updateResourceReuploadKeyFlag(String user, String project, ResourceData resourceData, boolean reuploadKeyRequired) {
+ if (resourceData.getResourceType() == ResourceType.EXPLORATORY) {
+ exploratoryDAO.updateReuploadKeyForExploratory(user, project, resourceData.getExploratoryName(), reuploadKeyRequired);
+ } else if (resourceData.getResourceType() == ResourceType.COMPUTATIONAL) {
+ computationalDAO.updateReuploadKeyFlagForComputationalResource(user, project,
+ resourceData.getExploratoryName(), resourceData.getComputationalName(), reuploadKeyRequired);
+ }
+ }
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
index e84908e..cb7b1c1 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImpl.java
@@ -44,7 +44,12 @@
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
import java.time.temporal.ChronoUnit;
import java.util.Date;
import java.util.List;
@@ -55,7 +60,13 @@
import java.util.stream.Stream;
import static com.epam.dlab.constants.ServiceConsts.PROVISIONING_SERVICE_NAME;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CONFIGURING;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
+import static com.epam.dlab.dto.UserInstanceStatus.TERMINATING;
import static com.epam.dlab.dto.base.DataEngineType.getDockerImageName;
import static java.time.ZoneId.systemDefault;
import static java.util.Collections.singletonList;
@@ -98,44 +109,44 @@
private RESTService provisioningService;
@Override
- public SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String exploratoryName) {
- return schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(user, exploratoryName)
+ public SchedulerJobDTO fetchSchedulerJobForUserAndExploratory(String user, String project, String exploratoryName) {
+ return schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(user, project, exploratoryName)
.orElseThrow(() -> new ResourceNotFoundException(String.format(SCHEDULER_NOT_FOUND_MSG, user,
exploratoryName)));
}
@Override
- public SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String exploratoryName,
+ public SchedulerJobDTO fetchSchedulerJobForComputationalResource(String user, String project, String exploratoryName,
String computationalName) {
- return schedulerJobDAO.fetchSingleSchedulerJobForCluster(user, exploratoryName, computationalName)
+ return schedulerJobDAO.fetchSingleSchedulerJobForCluster(user, project, exploratoryName, computationalName)
.orElseThrow(() -> new ResourceNotFoundException(String.format(SCHEDULER_NOT_FOUND_MSG, user,
exploratoryName) + " with computational resource " + computationalName));
}
@Override
- public void updateExploratorySchedulerData(String user, String exploratoryName, SchedulerJobDTO dto) {
- validateExploratoryStatus(user, exploratoryName);
+ public void updateExploratorySchedulerData(String user, String project, String exploratoryName, SchedulerJobDTO dto) {
+ validateExploratoryStatus(user, project, exploratoryName);
populateDefaultSchedulerValues(dto);
log.debug("Updating exploratory {} for user {} with new scheduler job data: {}...", exploratoryName, user,
dto);
- exploratoryDAO.updateSchedulerDataForUserAndExploratory(user, exploratoryName, dto);
+ exploratoryDAO.updateSchedulerDataForUserAndExploratory(user, project, exploratoryName, dto);
if (!dto.inactivityScheduler() && dto.isSyncStartRequired()) {
- shareSchedulerJobDataToSparkClusters(user, exploratoryName, dto);
+ shareSchedulerJobDataToSparkClusters(user, project, exploratoryName, dto);
} else if (!dto.inactivityScheduler()) {
- computationalDAO.updateSchedulerSyncFlag(user, exploratoryName, dto.isSyncStartRequired());
+ computationalDAO.updateSchedulerSyncFlag(user, project, exploratoryName, dto.isSyncStartRequired());
}
}
@Override
- public void updateComputationalSchedulerData(String user, String exploratoryName, String computationalName,
+ public void updateComputationalSchedulerData(String user, String project, String exploratoryName, String computationalName,
SchedulerJobDTO dto) {
- validateExploratoryStatus(user, exploratoryName);
- validateComputationalStatus(user, exploratoryName, computationalName);
+ validateExploratoryStatus(user, project, exploratoryName);
+ validateComputationalStatus(user, project, exploratoryName, computationalName);
populateDefaultSchedulerValues(dto);
log.debug("Updating computational resource {} affiliated with exploratory {} for user {} with new scheduler " +
"job data {}...", computationalName, exploratoryName, user, dto);
- computationalDAO.updateSchedulerDataForComputationalResource(user, exploratoryName, computationalName, dto);
+ computationalDAO.updateSchedulerDataForComputationalResource(user, project, exploratoryName, computationalName, dto);
}
@Override
@@ -203,11 +214,12 @@
}
private void stopComputational(SchedulerJobData job) {
+ final String project = job.getProject();
final String expName = job.getExploratoryName();
final String compName = job.getComputationalName();
final String user = job.getUser();
log.debug("Stopping exploratory {} computational {} for user {} by scheduler", expName, compName, user);
- computationalService.stopSparkCluster(securityService.getServiceAccountInfo(user), expName, compName);
+ computationalService.stopSparkCluster(securityService.getServiceAccountInfo(user), project, expName, compName);
}
private void terminateComputational(SchedulerJobData job) {
@@ -216,14 +228,15 @@
final String compName = job.getComputationalName();
final UserInfo userInfo = securityService.getServiceAccountInfo(user);
log.debug("Terminating exploratory {} computational {} for user {} by scheduler", expName, compName, user);
- computationalService.terminateComputational(userInfo, expName, compName);
+ computationalService.terminateComputational(userInfo, job.getProject(), expName, compName);
}
private void stopExploratory(SchedulerJobData job) {
final String expName = job.getExploratoryName();
final String user = job.getUser();
+ final String project = job.getProject();
log.debug("Stopping exploratory {} for user {} by scheduler", expName, user);
- exploratoryService.stop(securityService.getServiceAccountInfo(user), expName);
+ exploratoryService.stop(securityService.getServiceAccountInfo(user), project, expName);
}
private List<SchedulerJobData> getExploratorySchedulersForTerminating(OffsetDateTime now) {
@@ -250,7 +263,7 @@
log.trace("Starting computational for exploratory {} for user {} by scheduler", exploratoryName, user);
final DataEngineType sparkCluster = DataEngineType.SPARK_STANDALONE;
final List<UserComputationalResource> compToBeStarted =
- computationalDAO.findComputationalResourcesWithStatus(user, exploratoryName, STOPPED);
+ computationalDAO.findComputationalResourcesWithStatus(user, project, exploratoryName, STOPPED);
compToBeStarted
.stream()
@@ -261,9 +274,10 @@
private void terminateExploratory(SchedulerJobData job) {
final String user = job.getUser();
+ final String project = job.getProject();
final String expName = job.getExploratoryName();
log.debug("Terminating exploratory {} for user {} by scheduler", expName, user);
- exploratoryService.terminate(securityService.getUserInfoOffline(user), expName);
+ exploratoryService.terminate(securityService.getUserInfoOffline(user), project, expName);
}
private void startSpark(String user, String expName, String compName, String project) {
@@ -282,19 +296,20 @@
* performed automatically with notebook stopping since Spark resources have such feature).
*
* @param user user's name
+ * @param project project name
* @param exploratoryName name of exploratory resource
* @param dto scheduler job data.
*/
- private void shareSchedulerJobDataToSparkClusters(String user, String exploratoryName, SchedulerJobDTO dto) {
- List<String> correspondingSparkClusters = computationalDAO.getComputationalResourcesWhereStatusIn(user,
- singletonList(DataEngineType.SPARK_STANDALONE), exploratoryName,
- STARTING, RUNNING, STOPPING, STOPPED);
+ private void shareSchedulerJobDataToSparkClusters(String user, String project, String exploratoryName, SchedulerJobDTO dto) {
+ List<String> correspondingSparkClusters = computationalDAO.getComputationalResourcesWhereStatusIn(user, project,
+ singletonList(DataEngineType.SPARK_STANDALONE),
+ exploratoryName, STARTING, RUNNING, STOPPING, STOPPED);
SchedulerJobDTO dtoWithoutStopData = getSchedulerJobWithoutStopData(dto);
for (String sparkName : correspondingSparkClusters) {
log.debug("Updating computational resource {} affiliated with exploratory {} for user {} with new " +
"scheduler job data {}...", sparkName, exploratoryName, user, dtoWithoutStopData);
- computationalDAO.updateSchedulerDataForComputationalResource(user, exploratoryName, sparkName,
- dtoWithoutStopData);
+ computationalDAO.updateSchedulerDataForComputationalResource(user, project, exploratoryName,
+ sparkName, dtoWithoutStopData);
}
}
@@ -367,10 +382,11 @@
}
private boolean computationalInactivityExceed(SchedulerJobData schedulerJobData, SchedulerJobDTO schedulerData) {
+ final String projectName = schedulerJobData.getProject();
final String explName = schedulerJobData.getExploratoryName();
final String compName = schedulerJobData.getComputationalName();
final String user = schedulerJobData.getUser();
- final UserComputationalResource c = computationalDAO.fetchComputationalFields(user, explName, compName);
+ final UserComputationalResource c = computationalDAO.fetchComputationalFields(user, projectName, explName, compName);
final Long maxInactivity = schedulerData.getMaxInactivity();
return inactivityCondition(maxInactivity, c.getStatus(), c.getLastActivity());
}
@@ -381,9 +397,10 @@
}
private boolean exploratoryInactivityExceed(SchedulerJobData schedulerJobData, SchedulerJobDTO schedulerData) {
+ final String project = schedulerJobData.getProject();
final String expName = schedulerJobData.getExploratoryName();
final String user = schedulerJobData.getUser();
- final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(user, expName, true);
+ final UserInstanceDTO userInstanceDTO = exploratoryDAO.fetchExploratoryFields(user, project, expName, true);
final boolean canBeStopped = userInstanceDTO.getResources()
.stream()
.map(UserComputationalResource::getStatus)
@@ -409,14 +426,14 @@
}
}
- private void validateExploratoryStatus(String user, String exploratoryName) {
- final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(user, exploratoryName);
+ private void validateExploratoryStatus(String user, String project, String exploratoryName) {
+ final UserInstanceDTO userInstance = exploratoryDAO.fetchExploratoryFields(user, project, exploratoryName);
validateResourceStatus(userInstance.getStatus());
}
- private void validateComputationalStatus(String user, String exploratoryName, String computationalName) {
+ private void validateComputationalStatus(String user, String project, String exploratoryName, String computationalName) {
final UserComputationalResource computationalResource =
- computationalDAO.fetchComputationalFields(user, exploratoryName, computationalName);
+ computationalDAO.fetchComputationalFields(user, project, exploratoryName, computationalName);
final String computationalStatus = computationalResource.getStatus();
validateResourceStatus(computationalStatus);
}
diff --git a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
index 4d2eaaf..8e9f3d8 100644
--- a/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
+++ b/services/self-service/src/main/java/com/epam/dlab/backendapi/util/RequestBuilder.java
@@ -303,6 +303,7 @@
return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ExploratoryActionDTO.class)
.withNotebookInstanceName(userInstance.getExploratoryId())
.withProject(userInstance.getProject())
+ .withEndpoint(endpointDTO.getName())
.withNotebookImage(userInstance.getImageName())
.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()))
.withExploratoryName(userInstance.getExploratoryName());
@@ -335,6 +336,7 @@
return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), LibListComputationalDTO.class)
.withComputationalId(computationalResource.getComputationalId())
.withProject(userInstance.getProject())
+ .withEndpoint(endpointDTO.getName())
.withComputationalImage(computationalResource.getImageName())
.withLibCacheKey(ExploratoryLibCache.libraryCacheKey(userInstance))
.withApplicationName(getApplicationNameFromImage(userInstance.getImageName()));
@@ -493,7 +495,8 @@
.withComputationalName(computationalName)
.withNotebookInstanceName(exploratory.getExploratoryId())
.withApplicationName(getApplicationNameFromImage(exploratory.getImageName()))
- .withProject(exploratory.getProject());
+ .withProject(exploratory.getProject())
+ .withEndpoint(endpointDTO.getName());
}
@SuppressWarnings("unchecked")
@@ -504,12 +507,13 @@
.withComputationalName(computationalName)
.withNotebookInstanceName(exploratory.getExploratoryId())
.withApplicationName(getApplicationNameFromImage(exploratory.getImageName()))
- .withProject(exploratory.getProject());
+ .withProject(exploratory.getProject())
+ .withEndpoint(endpointDTO.getName());
}
@SuppressWarnings("unchecked")
public <T extends ExploratoryImageDTO> T newExploratoryImageCreate(UserInfo userInfo, UserInstanceDTO userInstance,
- String imageName, EndpointDTO endpointDTO) {
+ String imageName, EndpointDTO endpointDTO, ProjectDTO projectDTO) {
checkInappropriateCloudProviderOrElseThrowException(endpointDTO.getCloudProvider());
return (T) newResourceSysBaseDTO(userInfo, endpointDTO.getCloudProvider(), ExploratoryImageDTO.class)
.withProject(userInstance.getProject())
@@ -519,7 +523,8 @@
.withNotebookImage(userInstance.getImageName())
.withImageName(imageName)
.withEndpoint(userInstance.getEndpoint())
- .withTags(userInstance.getTags());
+ .withTags(userInstance.getTags())
+ .withSharedImageEnabled(String.valueOf(projectDTO.isSharedImageEnabled()));
}
@SuppressWarnings("unchecked")
@@ -534,7 +539,8 @@
.withNotebookImageName(exploratory.getImageName())
.withImage(cr.getImageName())
.withComputationalId(cr.getComputationalId())
- .withProject(exploratory.getProject());
+ .withProject(exploratory.getProject())
+ .withEndpoint(endpointDTO.getName());
}
@@ -604,7 +610,8 @@
.withNotebookImage(userInstance.getImageName())
.withExploratoryName(userInstance.getExploratoryName())
.withReuploadKeyRequired(userInstance.isReuploadKeyRequired())
- .withProject(userInstance.getProject());
+ .withProject(userInstance.getProject())
+ .withEndpoint(endpointDTO.getName());
return dto;
}
diff --git a/services/self-service/src/main/resources/mongo/aws/mongo_roles.json b/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
index 00d4821..2adb28c 100644
--- a/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/aws/mongo_roles.json
@@ -2,6 +2,8 @@
{
"_id": "nbShapes_p2.xlarge_fetching",
"description": "Use p2.xlarge instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"p2.xlarge"
],
@@ -12,6 +14,8 @@
{
"_id": "nbShapes_t2.medium_fetching",
"description": "Use t2.medium instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"t2.medium"
],
@@ -22,6 +26,8 @@
{
"_id": "nbShapes_r3.xlarge_fetching",
"description": "Use r3.xlarge instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"r3.xlarge"
],
@@ -32,6 +38,8 @@
{
"_id": "nbShapes_r4.2xlarge_fetching",
"description": "Use r4.2xlarge instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"r4.2xlarge"
],
@@ -42,6 +50,8 @@
{
"_id": "nbShapes_r3.4xlarge_fetching",
"description": "Use r3.4xlarge instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"r3.4xlarge"
],
@@ -52,6 +62,8 @@
{
"_id": "nbShapes_r3.8xlarge_fetching",
"description": "Use r3.8xlarge instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"r3.8xlarge"
],
@@ -62,6 +74,8 @@
{
"_id": "nbShapes_c4.large_fetching",
"description": "Use c4.large instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"c4.large"
],
@@ -72,6 +86,8 @@
{
"_id": "nbShapes_c4.2xlarge_fetching",
"description": "Use c4.2xlarge instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"c4.2xlarge"
],
@@ -82,6 +98,8 @@
{
"_id": "nbShapes_c4.8xlarge_fetching",
"description": "Use c4.8xlarge instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AWS",
"exploratory_shapes": [
"c4.8xlarge"
],
@@ -92,6 +110,8 @@
{
"_id": "nbCreateDeeplearning",
"description": "Create Notebook Deep Learning",
+ "type": "NOTEBOOK",
+ "cloud": "AWS",
"exploratories": [
"docker.dlab-deeplearning"
],
@@ -102,6 +122,8 @@
{
"_id": "nbCreateJupyter",
"description": "Create Notebook Jupyter",
+ "type": "NOTEBOOK",
+ "cloud": "AWS",
"exploratories": [
"docker.dlab-jupyter"
],
@@ -112,6 +134,8 @@
{
"_id": "nbCreateJupyterLab",
"description": "Create Notebook JupyterLab",
+ "type": "NOTEBOOK",
+ "cloud": "AWS",
"exploratories": [
"docker.dlab-jupyterlab"
],
@@ -122,6 +146,8 @@
{
"_id": "nbCreateRstudio",
"description": "Create Notebook RStudio",
+ "type": "NOTEBOOK",
+ "cloud": "AWS",
"exploratories": [
"docker.dlab-rstudio"
],
@@ -132,6 +158,8 @@
{
"_id": "nbCreateTensor",
"description": "Create Notebook Jupyter with TensorFlow",
+ "type": "NOTEBOOK",
+ "cloud": "AWS",
"exploratories": [
"docker.dlab-tensor"
],
@@ -142,6 +170,8 @@
{
"_id": "nbCreateZeppelin",
"description": "Create Notebook Apache Zeppelin",
+ "type": "NOTEBOOK",
+ "cloud": "AWS",
"exploratories": [
"docker.dlab-zeppelin"
],
@@ -152,6 +182,8 @@
{
"_id": "nbCreateTensorRstudio",
"description": "Create Notebook RStudio with TensorFlow",
+ "type": "NOTEBOOK",
+ "cloud": "AWS",
"exploratories": [
"docker.dlab-tensor-rstudio"
],
@@ -162,6 +194,8 @@
{
"_id": "nbCreateDataEngine",
"description": "Create Data Engine",
+ "type": "COMPUTATIONAL",
+ "cloud": "AWS",
"computationals": [
"docker.dlab-dataengine"
],
@@ -172,6 +206,8 @@
{
"_id": "nbCreateDataEngineService",
"description": "Create Data Engine Service",
+ "type": "COMPUTATIONAL",
+ "cloud": "AWS",
"computationals": [
"docker.dlab-dataengine-service"
],
@@ -182,6 +218,8 @@
{
"_id": "compShapes_c4.xlarge_fetching",
"description": "Use c4.xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"c4.xlarge"
],
@@ -192,6 +230,8 @@
{
"_id": "compShapes_r3.xlarge_fetching",
"description": "Use r3.xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"r3.xlarge"
],
@@ -202,6 +242,8 @@
{
"_id": "compShapes_r4.2xlarge_fetching",
"description": "Use r4.2xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"r4.2xlarge"
],
@@ -212,6 +254,8 @@
{
"_id": "compShapes_r3.4xlarge_fetching",
"description": "Use r3.4xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"r3.4xlarge"
],
@@ -222,6 +266,8 @@
{
"_id": "compShapes_r3.8xlarge_fetching",
"description": "Use r3.8xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"r3.8xlarge"
],
@@ -232,6 +278,8 @@
{
"_id": "compShapes_c4.2xlarge_fetching",
"description": "Use c4.2xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"c4.2xlarge"
],
@@ -242,6 +290,8 @@
{
"_id": "compShapes_c4.8xlarge_fetching",
"description": "Use c4.8xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"c4.8xlarge"
],
@@ -252,6 +302,8 @@
{
"_id": "compShapes_p2.xlarge_fetching",
"description": "Use p2.xlarge instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AWS",
"computational_shapes": [
"p2.xlarge"
],
@@ -262,6 +314,8 @@
{
"_id": "nbBillingReportFull",
"description": "View full billing report for all users",
+ "type": "BILLING",
+ "cloud": "AWS",
"pages": [
"/api/infrastructure_provision/billing"
],
@@ -272,6 +326,8 @@
{
"_id": "admin",
"description": "Allow to execute administration operation",
+ "type": "ADMINISTRATION",
+ "cloud": "AWS",
"pages": [
"environment/*",
"/api/infrastructure/backup",
diff --git a/services/self-service/src/main/resources/mongo/azure/mongo_roles.json b/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
index b0a4a0a..bbd7789 100644
--- a/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/azure/mongo_roles.json
@@ -1,7 +1,9 @@
[
{
"_id": "nbShapes_Standard_NC6_fetching",
- "description": "Allow to use Standard_NC6 instance shape for notebook",
+ "description": "Use Standard_NC6 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_NC6"
],
@@ -12,6 +14,8 @@
{
"_id": "nbShapes_Standard_E4s_v3_fetching",
"description": "Use Standard_E4s_v3 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_E4s_v3"
],
@@ -22,6 +26,8 @@
{
"_id": "nbShapes_Standard_E16s_v3_fetching",
"description": "Use Standard_E16s_v3 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_E16s_v3"
],
@@ -32,6 +38,8 @@
{
"_id": "nbShapes_Standard_E32s_v3_fetching",
"description": "Use Standard_E32s_v3 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_E32s_v3"
],
@@ -42,6 +50,8 @@
{
"_id": "nbShapes_Standard_F2s_fetching",
"description": "Use Standard_F2s instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_F2s"
],
@@ -52,6 +62,8 @@
{
"_id": "nbShapes_Standard_F4s_fetching",
"description": "Use Standard_F4s instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_F4s"
],
@@ -62,6 +74,8 @@
{
"_id": "nbShapes_Standard_F8s_fetching",
"description": "Use Standard_F8s instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_F8s"
],
@@ -72,6 +86,8 @@
{
"_id": "nbShapes_Standard_F16s_fetching",
"description": "Use Standard_F16s instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "AZURE",
"exploratory_shapes": [
"Standard_F16s"
],
@@ -82,6 +98,8 @@
{
"_id": "nbCreateDeeplearning",
"description": "Create Notebook Deep Learning",
+ "type": "NOTEBOOK",
+ "cloud": "AZURE",
"exploratories": [
"docker.dlab-deeplearning"
],
@@ -92,6 +110,8 @@
{
"_id": "nbCreateJupyter",
"description": "Create Notebook Jupyter",
+ "type": "NOTEBOOK",
+ "cloud": "AZURE",
"exploratories": [
"docker.dlab-jupyter"
],
@@ -102,6 +122,8 @@
{
"_id": "nbCreateRstudio",
"description": "Create Notebook RStudio",
+ "type": "NOTEBOOK",
+ "cloud": "AZURE",
"exploratories": [
"docker.dlab-rstudio"
],
@@ -112,6 +134,8 @@
{
"_id": "nbCreateTensor",
"description": "Create Notebook Jupyter with TensorFlow",
+ "type": "NOTEBOOK",
+ "cloud": "AZURE",
"exploratories": [
"docker.dlab-tensor"
],
@@ -122,6 +146,8 @@
{
"_id": "nbCreateZeppelin",
"description": "Create Notebook Apache Zeppelin",
+ "type": "NOTEBOOK",
+ "cloud": "AZURE",
"exploratories": [
"docker.dlab-zeppelin"
],
@@ -132,6 +158,8 @@
{
"_id": "nbCreateDataEngine",
"description": "Create Data Engine",
+ "type": "COMPUTATIONAL",
+ "cloud": "AZURE",
"computationals": [
"docker.dlab-dataengine"
],
@@ -142,6 +170,8 @@
{
"_id": "compShapes_Standard_F4s_fetching",
"description": "Use Standard_F4s instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AZURE",
"computational_shapes": [
"Standard_F4s"
],
@@ -152,6 +182,8 @@
{
"_id": "compShapes_Standard_E4s_v3_fetching",
"description": "Use Standard_E4s_v3 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AZURE",
"computational_shapes": [
"Standard_E4s_v3"
],
@@ -162,6 +194,8 @@
{
"_id": "compShapes_Standard_E16s_v3_fetching",
"description": "Use Standard_E16s_v3 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AZURE",
"computational_shapes": [
"Standard_E16s_v3"
],
@@ -172,6 +206,8 @@
{
"_id": "compShapes_Standard_E32s_v3_fetching",
"description": "Use Standard_E32s_v3 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AZURE",
"computational_shapes": [
"Standard_E32s_v3"
],
@@ -182,6 +218,8 @@
{
"_id": "compShapes_Standard_F8s_fetching",
"description": "Use Standard_F8s instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AZURE",
"computational_shapes": [
"Standard_F8s"
],
@@ -192,6 +230,8 @@
{
"_id": "compShapes_Standard_F16s_fetching",
"description": "Use Standard_F16s instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AZURE",
"computational_shapes": [
"Standard_F16s"
],
@@ -202,6 +242,8 @@
{
"_id": "compShapes_Standard_NC6_fetching",
"description": "Use Standard_NC6 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "AZURE",
"computational_shapes": [
"Standard_NC6"
],
@@ -212,6 +254,8 @@
{
"_id": "nbBillingReportFull",
"description": "View full billing report for all users",
+ "type": "BILLING",
+ "cloud": "AZURE",
"pages": [
"/api/infrastructure_provision/billing"
],
@@ -222,6 +266,8 @@
{
"_id": "admin",
"description": "Allow to execute administration operation",
+ "type": "ADMINISTRATION",
+ "cloud": "AZURE",
"pages": [
"environment/*",
"/api/infrastructure/backup",
diff --git a/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json b/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
index 67548bf..d55e85f 100644
--- a/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
+++ b/services/self-service/src/main/resources/mongo/gcp/mongo_roles.json
@@ -2,6 +2,8 @@
{
"_id": "nbShapes_n1-highcpu-2_fetching",
"description": "Use n1-highcpu-2 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "GCP",
"exploratory_shapes": [
"n1-highcpu-2"
],
@@ -12,6 +14,8 @@
{
"_id": "nbShapes_n1-highcpu-8_fetching",
"description": "Use n1-highcpu-8 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "GCP",
"exploratory_shapes": [
"n1-highcpu-8"
],
@@ -22,6 +26,8 @@
{
"_id": "nbShapes_n1-highcpu-32_fetching",
"description": "Use n1-highcpu-32 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "GCP",
"exploratory_shapes": [
"n1-highcpu-32"
],
@@ -32,6 +38,8 @@
{
"_id": "nbShapes_n1-highmem-4_fetching",
"description": "Use n1-highmem-4 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "GCP",
"exploratory_shapes": [
"n1-highmem-4"
],
@@ -42,6 +50,8 @@
{
"_id": "nbShapes_n1-highmem-16_fetching",
"description": "Use n1-highmem-16 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "GCP",
"exploratory_shapes": [
"n1-highmem-16"
],
@@ -52,6 +62,8 @@
{
"_id": "nbShapes_n1-highmem-32_fetching",
"description": "Use n1-highmem-32 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "GCP",
"exploratory_shapes": [
"n1-highmem-32"
],
@@ -62,6 +74,8 @@
{
"_id": "nbShapes_n1-standard-2_fetching",
"description": "Use n1-standard-2 instance shape for notebook",
+ "type": "NOTEBOOK_SHAPE",
+ "cloud": "GCP",
"exploratory_shapes": [
"n1-standard-2"
],
@@ -72,6 +86,8 @@
{
"_id": "nbCreateDeeplearning",
"description": "Create Notebook Deep Learning",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-deeplearning"
],
@@ -82,6 +98,8 @@
{
"_id": "nbCreateJupyter",
"description": "Create Notebook Jupyter",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-jupyter"
],
@@ -92,6 +110,8 @@
{
"_id": "nbCreateJupyterLab",
"description": "Create Notebook JupyterLab",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-jupyterlab"
],
@@ -102,6 +122,8 @@
{
"_id": "nbCreateSuperset",
"description": "Create Notebook Superset",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-superset"
],
@@ -112,6 +134,8 @@
{
"_id": "nbCreateRstudio",
"description": "Create Notebook RStudio",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-rstudio"
],
@@ -122,6 +146,8 @@
{
"_id": "nbCreateTensor",
"description": "Create Notebook Jupyter with TensorFlow",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-tensor"
],
@@ -132,6 +158,8 @@
{
"_id": "nbCreateTensorRstudio",
"description": "Create Notebook RStudio with TensorFlow",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-tensor-rstudio"
],
@@ -142,6 +170,8 @@
{
"_id": "nbCreateZeppelin",
"description": "Create Notebook Apache Zeppelin",
+ "type": "NOTEBOOK",
+ "cloud": "GCP",
"exploratories": [
"docker.dlab-zeppelin"
],
@@ -152,6 +182,8 @@
{
"_id": "nbCreateDataEngine",
"description": "Create Data Engine",
+ "type": "COMPUTATIONAL",
+ "cloud": "GCP",
"computationals": [
"docker.dlab-dataengine"
],
@@ -162,6 +194,8 @@
{
"_id": "nbCreateDataEngineService",
"description": "Create Data Engine Service",
+ "type": "COMPUTATIONAL",
+ "cloud": "GCP",
"computationals": [
"docker.dlab-dataengine-service"
],
@@ -172,6 +206,8 @@
{
"_id": "compShapes_n1-standard-2_fetching",
"description": "Use n1-standard-2 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "GCP",
"computational_shapes": [
"n1-standard-2"
],
@@ -182,6 +218,8 @@
{
"_id": "compShapes_n1-highmem-4_fetching",
"description": "Use n1-highmem-4 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "GCP",
"computational_shapes": [
"n1-highmem-4"
],
@@ -192,6 +230,8 @@
{
"_id": "compShapes_n1-highmem-16_fetching",
"description": "Use n1-highmem-16 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "GCP",
"computational_shapes": [
"n1-highmem-16"
],
@@ -202,6 +242,8 @@
{
"_id": "compShapes_n1-highmem-32_fetching",
"description": "Use n1-highmem-32 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "GCP",
"computational_shapes": [
"n1-highmem-32"
],
@@ -212,6 +254,8 @@
{
"_id": "compShapes_n1-highcpu-8_fetching",
"description": "Use n1-highcpu-8 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "GCP",
"computational_shapes": [
"n1-highcpu-8"
],
@@ -222,6 +266,8 @@
{
"_id": "compShapes_n1-highcpu-2_fetching",
"description": "Use n1-highcpu-2 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "GCP",
"computational_shapes": [
"n1-highcpu-2"
],
@@ -232,6 +278,8 @@
{
"_id": "compShapes_n1-highcpu-32_fetching",
"description": "Use n1-highcpu-32 instance shape for cluster",
+ "type": "COMPUTATIONAL_SHAPE",
+ "cloud": "GCP",
"computational_shapes": [
"n1-highcpu-32"
],
@@ -242,6 +290,8 @@
{
"_id": "nbBillingReportFull",
"description": "View full billing report for all users",
+ "type": "BILLING",
+ "cloud": "GCP",
"pages": [
"/api/infrastructure_provision/billing"
],
@@ -252,6 +302,8 @@
{
"_id": "admin",
"description": "Allow to execute administration operation",
+ "type": "ADMINISTRATION",
+ "cloud": "GCP",
"pages": [
"environment/*",
"/api/infrastructure/backup",
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts
index d29a5d0..45c6a23 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/endpoints/endpoints.component.ts
@@ -25,7 +25,7 @@
import { EndpointService } from '../../../core/services';
import { NotificationDialogComponent } from '../../../shared/modal-dialog/notification-dialog';
import { PATTERNS } from '../../../core/util';
-import { map } from "rxjs/operators";
+import { map } from 'rxjs/operators';
export interface Endpoint {
name: string;
@@ -76,9 +76,14 @@
project.name,
resource.exploratories.filter(notebook => notebook.project === project.name),
project.endpoints.filter(endpoint => endpoint.name === data.name)[0].status))
- .filter(project => project.nodeStatus !== "TERMINATED" && project.nodeStatus !== "TERMINATING" && project.nodeStatus !== "FAILED")))
+ .filter(project => project.nodeStatus !== 'TERMINATED'
+ && project.nodeStatus !== 'TERMINATING'
+ && project.nodeStatus !== 'FAILED'
+ )))
.subscribe((resource: any) => {
- this.dialog.open(NotificationDialogComponent, { data: { type: 'confirmation', item: data, list: resource }, panelClass: 'modal-sm' })
+ this.dialog.open(NotificationDialogComponent, { data: {
+ type: 'confirmation', item: data, list: resource
+ }, panelClass: 'modal-sm' })
.afterClosed().subscribe(result => {
result === 'noTerminate' && this.deleteEndpointOption(data, false);
result === 'terminate' && this.deleteEndpointOption(data, true);
@@ -86,13 +91,13 @@
});
}
- public getEndpoinConnectionStatus(url){
- let getStatus = this.endpointService.getEndpoinConnectionStatus(encodeURIComponent(url));
+ public getEndpoinConnectionStatus(url) {
+ const getStatus = this.endpointService.getEndpoinConnectionStatus(encodeURIComponent(url));
this.dialog.open(EndpointTestResultDialogComponent, { data: {url: url, getStatus}, panelClass: 'modal-sm' });
}
private static createResourceList(name: string, resource: Array<any>, nodeStatus: string): Object {
- return {name, resource, nodeStatus}
+ return {name, resource, nodeStatus};
}
private initFormModel(): void {
@@ -104,26 +109,26 @@
});
}
- private deleteEndpointOption(data, option): void{
+ private deleteEndpointOption(data, option): void {
this.endpointService.deleteEndpoint(`${data.name}?with-resources=${option}`).subscribe(() => {
this.toastr.success(option ? 'Endpoint successfully disconnected. All related resources are terminating!' : 'Endpoint successfully disconnected!' , 'Success!');
this.getEndpointList();
}, error => this.toastr.error(error.message || 'Endpoint creation failed!', 'Oops!'));
}
- private getEndpointList() : void{
+ private getEndpointList(): void {
this.endpointService.getEndpointsData().subscribe((endpoints: any) => this.endpoints = endpoints);
}
private validateUrl(control) {
- if (control && control.value){
+ if (control && control.value) {
const isDublicat = this.endpoints.some(endpoint => endpoint['url'].toLocaleLowerCase() === control.value.toLowerCase());
return isDublicat ? { isDuplicate: true } : null;
}
}
private validateName(control) {
- if (control && control.value){
+ if (control && control.value) {
const isDublicat = this.endpoints.some(endpoint => endpoint['name'].toLocaleLowerCase() === control.value.toLowerCase());
return isDublicat ? { isDuplicate: true } : null;
}
@@ -212,14 +217,14 @@
this.response = true;
return;
},
- ()=> {
+ () => {
this.isConnected = false;
this.response = true;
return;
- })
+ });
}
private cutToLongUrl(url) {
- return url.length > 25 ? url.slice(0,25) + '...' : url
+ return url.length > 25 ? url.slice(0, 25) + '...' : url;
}
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
index ba8e6f7..74ff5af 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/manage-environment/manage-environment-dilog.component.html
@@ -19,7 +19,7 @@
<div id="dialog-box" class="manage-env-dialog">
<header class="dialog-header">
- <h4 class="modal-title">Manage environment</h4>
+ <h4 class="modal-title">Manage DLab quotas</h4>
<button type="button" class="close" (click)="dialogRef.close()">×</button>
</header>
<div class="dialog-content">
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
index 1508e56..1945cd0 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.html
@@ -77,7 +77,8 @@
<span [hidden]="filtering && filterForm.statuses.length > 0 && !collapsedFilterRow">more_vert</span>
</i>
</button> </th>
- <td mat-cell *matCellDef="let element" class="ani status" ngClass="{{element.status || ''}}">{{ element.status }}
+ <td mat-cell *matCellDef="let element" class="ani status" >
+ <span ngClass="{{element.status || ''}}">{{ element.status }}</span>
</td>
</ng-container>
@@ -120,8 +121,10 @@
</td>
</ng-container>
- <ng-container matColumnDef="actions">
- <th mat-header-cell *matHeaderCellDef class="actions"></th>
+ <ng-container matColumnDef="actions">
+ <th mat-header-cell *matHeaderCellDef class="actions">
+ <span class="label"> Actions </span>
+ </th>
<td mat-cell *matCellDef="let element" class=" settings actions-col">
<span #settings class="actions" (click)="actions.toggle($event, settings)" *ngIf="element.type !== 'edge node' && element.type !== 'odahu'"
[ngClass]="{
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
index 87bef73..6c52559 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management-grid/management-grid.component.scss
@@ -50,13 +50,17 @@
.settings {
padding-right: 14px;
- .actions {
- margin-top: 2px;
+
+ }
+ .actions {
+ margin-top: 0px;
+ .label{
+ padding-right: 5px;
}
}
-
.actions-col {
width: 6%;
+
}
.dashboard_table_body {
@@ -86,19 +90,21 @@
height: auto;
.label {
display: inline-block;
- padding-top: 10px;
+ padding-top: 14px;
vertical-align: super !important;
padding-left: 5px;
- font-size: 11px;
+ font-size: 12px;
+ }
+ .actions {
+ text-align: right;
+ .label {
+ display: inline-block;
+ padding-top: 11px;
+ }
}
}
.filter-row {
background: inherit;
}
-
- .actions {
- text-align: right;
- }
-
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
index 7f2d728..4c4bdae 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.html
@@ -27,7 +27,7 @@
<i class="material-icons"></i>SSN Monitor
</button> -->
<button mat-raised-button class="butt env" (click)="openManageEnvironmentDialog()">
- <i class="material-icons"></i>Manage environment
+ <i class="material-icons"></i>Manage DLab quotas
</button>
<!-- <button mat-raised-button class="butt" (click)="showBackupDialog()" [disabled]="creatingBackup">
<i class="material-icons">backup</i>Backup
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
index da2122b..477b872 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/management/management.component.ts
@@ -48,7 +48,7 @@
export class ManagementComponent implements OnInit {
public user: string = '';
public healthStatus: GeneralEnvironmentStatus;
- public anyEnvInProgress: boolean = false;
+ // public anyEnvInProgress: boolean = false;
public dialogRef: any;
constructor(
@@ -78,6 +78,7 @@
.environmentManagement(
$event.environment.user,
$event.action,
+ $event.environment.project,
$event.environment.type === 'edge node' ? 'edge' : $event.environment.name,
$event.resource ? $event.resource.computational_name : null
).subscribe(
@@ -85,9 +86,9 @@
error => this.toastr.error('Environment management failed!', 'Oops!'));
}
- showBackupDialog() {
- this.dialog.open(BackupDilogComponent, { panelClass: 'modal-sm' });
- }
+ // showBackupDialog() {
+ // this.dialog.open(BackupDilogComponent, { panelClass: 'modal-sm' });
+ // }
showEndpointsDialog() {
this.dialog.open(EndpointsComponent, { panelClass: 'modal-xl-s' })
@@ -104,16 +105,16 @@
});
}
- openSsnMonitorDialog() {
- this.dialog.open(SsnMonitorComponent, { panelClass: 'modal-lg' });
- }
-
- isEnvironmentsInProgress(exploratory): boolean {
- return exploratory.some(item => {
- return item.exploratory.some(el => el.status === 'creating' || el.status === 'starting' ||
- el.resources.some(elem => elem.status === 'creating' || elem.status === 'starting' || elem.status === 'configuring'));
- });
- }
+ // openSsnMonitorDialog() {
+ // this.dialog.open(SsnMonitorComponent, { panelClass: 'modal-lg' });
+ // }
+ //
+ // isEnvironmentsInProgress(exploratory): boolean {
+ // return exploratory.some(item => {
+ // return item.exploratory.some(el => el.status === 'creating' || el.status === 'starting' ||
+ // el.resources.some(elem => elem.status === 'creating' || elem.status === 'starting' || elem.status === 'configuring'));
+ // });
+ // }
setBudgetLimits($event) {
this.projectService.updateProjectsBudget($event.projects).subscribe((result: any) => {
@@ -136,37 +137,37 @@
// .subscribe(() => this.handleSuccessAction(event.action), error => this.toastr.error(error.message, 'Oops!'));
// }
- handleSuccessAction(action) {
- this.toastr.success(`Action ${action} is processing!`, 'Processing!');
- this.projectService.getProjectsManagingList().subscribe(data => {
- this.dialogRef.componentInstance.data.projectsList = data;
- this.dialogRef.componentInstance.setProjectsControl();
- });
- this.buildGrid();
- }
+ // handleSuccessAction(action) {
+ // this.toastr.success(`Action ${action} is processing!`, 'Processing!');
+ // this.projectService.getProjectsManagingList().subscribe(data => {
+ // this.dialogRef.componentInstance.data.projectsList = data;
+ // this.dialogRef.componentInstance.setProjectsControl();
+ // });
+ // this.buildGrid();
+ // }
+ //
+ // get creatingBackup(): boolean {
+ // return this.backupService.inProgress;
+ // }
- get creatingBackup(): boolean {
- return this.backupService.inProgress;
- }
-
- private getExploratoryList() {
- this.userResourceService.getUserProvisionedResources()
- .subscribe((result) => this.anyEnvInProgress = this.isEnvironmentsInProgress(
- ExploratoryModel.loadEnvironments(result)));
- }
+ // private getExploratoryList() {
+ // this.userResourceService.getUserProvisionedResources()
+ // .subscribe((result) => this.anyEnvInProgress = this.isEnvironmentsInProgress(
+ // ExploratoryModel.loadEnvironments(result)));
+ // }
private getEnvironmentHealthStatus() {
this.healthStatusService
.getEnvironmentStatuses()
.subscribe((status: GeneralEnvironmentStatus) => {
this.healthStatus = status;
- this.getExploratoryList();
+ // this.getExploratoryList();
});
}
- private getActiveUsersList() {
- return this.healthStatusService.getActiveUsers();
- }
+ // private getActiveUsersList() {
+ // return this.healthStatusService.getActiveUsers();
+ // }
private getTotalBudgetData() {
return this.healthStatusService.getTotalBudgetData();
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
index e21990d..014c89b 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-data.service.ts
@@ -26,9 +26,12 @@
@Injectable()
export class ProjectDataService {
- _projects = new BehaviorSubject<any>(null);
- endpointsList: any;
- constructor(private projectService: ProjectService, private endpointService: EndpointService) {
+ public _projects = new BehaviorSubject<any>(null);
+ private endpointsList: any = [];
+ constructor(
+ private projectService: ProjectService,
+ private endpointService: EndpointService
+ ) {
this.getProjectsList();
}
@@ -41,7 +44,7 @@
this.projectService.getProjectsList()
.pipe(
mergeMap ((response: Project[]) => {
- if (response) {
+ if (response && this.endpointsList.length) {
response.forEach(project => project.endpoints.forEach(endpoint => {
const filtredEndpoints = this.endpointsList.filter(v => v.name === endpoint.name);
if (filtredEndpoints.length) {
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
index a2fc6cf..d8f697f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.html
@@ -24,7 +24,7 @@
</ng-container>
<ng-container matColumnDef="groups">
- <th mat-header-cell *matHeaderCellDef class="groups"> Groups </th>
+ <th mat-header-cell *matHeaderCellDef class="groups"> Group </th>
<td mat-cell *matCellDef="let element" class="groups">
<mat-chip-list>
<mat-chip *ngFor="let group of element.groups">{{ group }}</mat-chip>
@@ -63,7 +63,9 @@
</ng-container>
<ng-container matColumnDef="actions">
- <th mat-header-cell *matHeaderCellDef class="project-actions"></th>
+ <th mat-header-cell *matHeaderCellDef class="project-actions">
+ <span class="label"> Actions </span>
+ </th>
<td mat-cell *matCellDef="let element" class="settings">
<span #settings (click)="actions.toggle($event, settings)" class="actions"></span>
<bubble-up #actions class="list-menu" position="bottom-left" alternative="top-left">
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
index 73b725d..efe9ba3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project-list/project-list.component.scss
@@ -79,7 +79,13 @@
vertical-align: top;
padding: 10px 24px;
- span {
+ &.mat-header-cell{
+ padding-top: 19px;
+ padding-right: 13px;
+ color: rgba(0,0,0,.54);
+ }
+
+ span:not(.mat-header-cell span) {
transition: all .5s ease-in-out;
cursor: pointer;
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
index b4ba7df..ab69bdb 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.html
@@ -47,8 +47,7 @@
</mat-card>
<div [hidden]="!projectList.length">
- <project-list (editItem)="editProject($event)" (deleteItem)="deleteProject($event)"
- (toggleStatus)="toggleStatus($event)">
+ <project-list (editItem)="editProject($event)" (toggleStatus)="toggleStatus($event)">
</project-list>
</div>
</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
index ba3d45a..9833a40 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/project/project.component.ts
@@ -23,9 +23,10 @@
import { ToastrService } from 'ngx-toastr';
import { ProjectDataService } from './project-data.service';
-import { HealthStatusService, ProjectService } from '../../core/services';
+import {HealthStatusService, ProjectService, UserResourceService} from '../../core/services';
import { NotificationDialogComponent } from '../../shared/modal-dialog/notification-dialog';
import { ProjectListComponent } from './project-list/project-list.component';
+import {ExploratoryModel} from '../../resources/resources-grid/resources-grid.model';
export interface Endpoint {
name: string;
@@ -50,6 +51,7 @@
projectList: Project[] = [];
healthStatus: any;
activeFiltering: boolean = false;
+ resources: any = [];
private subscriptions: Subscription = new Subscription();
@@ -60,7 +62,8 @@
public toastr: ToastrService,
private projectService: ProjectService,
private projectDataService: ProjectDataService,
- private healthStatusService: HealthStatusService
+ private healthStatusService: HealthStatusService,
+ private userResourceService: UserResourceService
) { }
ngOnInit() {
@@ -70,12 +73,20 @@
if (value) this.projectList = value;
}));
this.refreshGrid();
+ this.getResources();
}
ngOnDestroy() {
this.subscriptions.unsubscribe();
}
+ private getResources() {
+ this.userResourceService.getUserProvisionedResources()
+ .subscribe((result: any) => {
+ this.resources = ExploratoryModel.loadEnvironments(result);
+ });
+ }
+
refreshGrid() {
this.projectDataService.updateProjects();
this.activeFiltering = false;
@@ -110,6 +121,28 @@
}
private toggleStatusRequest(data, action) {
+ if ( action === 'terminate') {
+ const projectsResources = this.resources
+ .filter(resource => resource.project === data.project_name )[0].exploratory
+ .filter(expl => expl.status !== 'terminated' && expl.status !== 'terminating');
+
+ let termResources = [];
+ data.endpoint.forEach(v => {
+ termResources = [...termResources, ...projectsResources.filter(resource => resource.endpoint === v)];
+ });
+
+ this.dialog.open(NotificationDialogComponent, { data: {
+ type: 'terminateNode', item: {action: data, resources: termResources.map(resource => resource.name)}
+ }, panelClass: 'modal-sm' })
+ .afterClosed().subscribe(result => {
+ result && this.edgeNodeAction(data, action);
+ });
+ } else {
+ this.edgeNodeAction(data, action);
+ }
+ }
+
+ private edgeNodeAction(data, action) {
this.projectService.toggleProjectStatus(data, action).subscribe(() => {
this.refreshGrid();
this.toastr.success(`Edge node ${this.toEndpointAction(action)} is in progress!`, 'Processing!');
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
index af111c2..d5496ac 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.html
@@ -45,6 +45,7 @@
class="material-icons">keyboard_arrow_right</i></button>
</div>
</mat-step>
+
<mat-step [completed]='false'>
<ng-template matStepLabel>Users</ng-template>
<div class="inner-step mat-reset">
@@ -58,35 +59,17 @@
class="material-icons">keyboard_arrow_right</i></button>
</div>
</mat-step>
+
<mat-step [completed]='false'>
<ng-template matStepLabel>Roles</ng-template>
<div class="inner-step mat-reset roles">
<div class="selector-wrapper">
- <mat-form-field>
- <mat-select
- multiple [compareWith]="compareObjects"
- name="roles"
- [(value)]="setupRoles"
- disableOptionCentering
- placeholder="Select roles"
- panelClass="select-role"
- >
- <mat-option class="multiple-select" disabled>
- <a class="select ani" (click)="selectAllOptions(setupRoles, rolesList)">
- <i class="material-icons">playlist_add_check</i> All
- </a>
- <a class="deselect ani" (click)="selectAllOptions(setupRoles)">
- <i class="material-icons">clear</i> None
- </a>
- </mat-option>
- <mat-option *ngFor="let role of rolesList" [value]="role">
- {{ role }}
- </mat-option>
- </mat-select>
- <button class="caret">
- <i class="material-icons">keyboard_arrow_down</i>
- </button>
- </mat-form-field>
+ <multi-level-select-dropdown
+ (selectionChange)="onUpdate($event)"
+ name="roles"
+ [items]="rolesList"
+ [model]="setupRoles">
+ </multi-level-select-dropdown>
</div>
</div>
<div class="text-center m-bott-10">
@@ -94,9 +77,10 @@
class="material-icons">keyboard_arrow_left</i>Back</button>
<button mat-raised-button (click)="resetDialog()" class="butt">Cancel</button>
<button mat-raised-button (click)="manageAction('create', 'group')" class="butt butt-success"
- [disabled]="!setupGroup || setupGroupName.errors?.patterns || setupGroupName.errors?.duplicate || !setupRoles.length > 0">Create</button>
+ [disabled]="!setupGroup || setupGroupName.errors?.patterns || setupGroupName.errors?.duplicate || !setupRoles.length">Create</button>
</div>
</mat-step>
+
</mat-horizontal-stepper>
</mat-card>
<mat-divider></mat-divider>
@@ -112,27 +96,13 @@
<th mat-header-cell *matHeaderCellDef class="roles"> Roles </th>
<td mat-cell *matCellDef="let element" class="roles">
<div class="inner-step mat-reset">
- <div class="selector-wrapper-edit">
- <mat-form-field class="select">
- <mat-select multiple [compareWith]="compareObjects" name="selected_roles" disableOptionCentering
- [(value)]="element.selected_roles" placeholder="Select roles" class="roles-select" panelClass="select-role">
- <mat-option class="multiple-select" disabled>
- <a class="select ani" (click)="selectAllOptions(element, rolesList, 'selected_roles')">
- <i class="material-icons">playlist_add_check</i> All
- </a>
- <a class="deselect ani" (click)="selectAllOptions(element, null, 'selected_roles')">
- <i class="material-icons">clear</i> None
- </a>
- </mat-option>
- <mat-option *ngFor="let role of rolesList" [value]="role">
- {{ role }}
- </mat-option>
- </mat-select>
- <button class="caret">
- <i class="material-icons">keyboard_arrow_down</i>
- </button>
- </mat-form-field>
- </div>
+ <multi-level-select-dropdown
+ (selectionChange)="onUpdate($event)"
+ [type]="element.group"
+ [items]="rolesList"
+ [model]="element.selected_roles">
+
+ </multi-level-select-dropdown>
</div>
</td>
</ng-container>
@@ -161,15 +131,17 @@
<ng-container matColumnDef="actions">
<th mat-header-cell *matHeaderCellDef class="actions"></th>
<td mat-cell *matCellDef="let element" class="actions">
- <span (click)="manageAction('delete', 'group', element)" class="reset ani">
- <mat-icon>delete_forever</mat-icon>
- </span>
- <span class="apply ani" matTooltip="Group cannot be updated without any selected role"
- matTooltipPosition="above" [matTooltipDisabled]="element.selected_roles.length > 0"
- [ngClass]="{ 'not-allowed' : !element.selected_roles.length }"
- (click)="manageAction('update', 'group', element)">
- <mat-icon>done</mat-icon>
- </span>
+ <div class="actions-wrapper">
+ <span (click)="manageAction('delete', 'group', element)" class="reset ani">
+ <mat-icon>delete_forever</mat-icon>
+ </span>
+ <span class="apply ani big-icon" matTooltip="Group cannot be updated without any selected role"
+ matTooltipPosition="above" [matTooltipDisabled]="element.selected_roles.length > 0"
+ [ngClass]="{ 'not-allowed' : !element.selected_roles.length || isGroupChanded(element)}"
+ (click)="manageAction('update', 'group', element)">
+ <mat-icon [ngClass]="{'big-icon': !isGroupChanded(element) && element.selected_roles.length}">done</mat-icon>
+ </span>
+ </div>
</td>
</ng-container>
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
index dd14655..66b1898 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.scss
@@ -88,17 +88,23 @@
}
}
+.mat-horizontal-content-container{
+ overflow: visible !important;
+}
+
.selector-wrapper {
display: flex;
align-self: center;
width: 490px;
height: 36px;
- padding-left: 10px;
+ padding-left: 0;
font-family: 'Open Sans', sans-serif;
font-size: 15px;
font-weight: 300;
box-shadow: 0 3px 1px -2px rgba(0, 0, 0, 0.2), 0 2px 2px 0 rgba(0, 0, 0, 0.14), 0 1px 5px 0 rgba(0, 0, 0, 0.12);
-
+ multi-level-select-dropdown{
+ width: 100%;
+ }
mat-form-field {
width: 100%;
@@ -137,7 +143,6 @@
}
.roles {
- // width: 30%;
.selector-wrapper-edit {
position: relative;
@@ -343,25 +348,35 @@
}
.roles {
- width: 30%;
+ width: 35%;
}
.users {
- width: 40%;
+ width: 35%;
}
.actions {
color: #607d8b;
width: 10%;
text-align: center;
-
+ .actions-wrapper{
+ height: 41px;
+ display: flex;
+ align-items: center;
+ justify-content: flex-end;
+ }
span {
- transition: all .5s ease-in-out;
+ transition: all .35s ease-in-out;
cursor: pointer;
.mat-icon {
font-size: 18px;
padding-top: 12px;
+ &.big-icon{
+ font-size: 25px;
+ padding-top: 10px;
+ transition: .25s;
+ }
}
&:hover {
diff --git a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
index cf2f086..19ab0ed 100644
--- a/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/administration/roles/roles.component.ts
@@ -25,7 +25,10 @@
import { RolesGroupsService, HealthStatusService } from '../../core/services';
import { CheckUtils } from '../../core/util';
import { DICTIONARY } from '../../../dictionary/global.dictionary';
-import {ProgressBarService} from "../../core/services/progress-bar.service";
+import {ProgressBarService} from '../../core/services/progress-bar.service';
+import {ConfirmationDialogComponent, ConfirmationDialogType} from '../../shared/modal-dialog/confirmation-dialog';
+import {logger} from 'codelyzer/util/logger';
+
@Component({
selector: 'dlab-roles',
@@ -37,11 +40,11 @@
public groupsData: Array<any> = [];
public roles: Array<any> = [];
- public rolesList: Array<string> = [];
+ public rolesList: Array<any> = [];
public setupGroup: string = '';
public setupUser: string = '';
public manageUser: string = '';
- public setupRoles: Array<string> = [];
+ public setupRoles: Array<any> = [];
public updatedRoles: Array<string> = [];
public healthStatus: any;
public delimitersRegex = /[-_]?/g;
@@ -50,6 +53,7 @@
stepperView: boolean = false;
displayedColumns: string[] = ['name', 'roles', 'users', 'actions'];
@Output() manageRolesGroupAction: EventEmitter<{}> = new EventEmitter();
+ private startedGroups;
constructor(
public toastr: ToastrService,
@@ -65,18 +69,22 @@
}
openManageRolesDialog() {
- setTimeout(() => {this.progressBarService.startProgressBar()} , 0);
+ setTimeout(() => {this.progressBarService.startProgressBar(); } , 0);
this.rolesService.getGroupsData().subscribe(groups => {
this.rolesService.getRolesData().subscribe(
(roles: any) => {
this.roles = roles;
- this.rolesList = roles.map(role => role.description);
+ this.rolesList = roles.map((role, index) => {
+ return {role: role.description, type: role.type, cloud: role.cloud};
+ });
+ this.rolesList = this.rolesList.sort((a, b) => (a.cloud > b.cloud) ? 1 : ((b.cloud > a.cloud) ? -1 : 0));
+ this.rolesList = this.rolesList.sort((a, b) => (a.type > b.type) ? 1 : ((b.type > a.type) ? -1 : 0));
this.updateGroupData(groups);
this.stepperView = false;
},
error => this.toastr.error(error.message, 'Oops!'));
- this.progressBarService.stopProgressBar()
+ this.progressBarService.stopProgressBar();
},
error => {
this.toastr.error(error.message, 'Oops!');
@@ -101,7 +109,7 @@
action, type, value: {
name: this.setupGroup,
users: this.setupUser ? this.setupUser.split(',').map(elem => elem.trim()) : [],
- roleIds: this.extractIds(this.roles, this.setupRoles)
+ roleIds: this.extractIds(this.roles, this.setupRoles.map(v => v.role))
}
});
this.stepperView = false;
@@ -121,12 +129,30 @@
}
});
} else if (action === 'update') {
- this.manageRolesGroups({
- action, type, value: {
- name: item.group,
- roleIds: this.extractIds(this.roles, item.selected_roles),
- users: item.users || []
+ const currGroupSource = this.startedGroups.filter(cur => cur.group === item.group)[0];
+ let deletedUsers = currGroupSource.users.filter(user => {
+ if (item.users.includes(user)) {
+ return false;
}
+ return true;
+ });
+ this.dialog.open(ConfirmationDialogComponent, { data:
+ { notebook: deletedUsers, type: ConfirmationDialogType.deleteUser }, panelClass: 'modal-sm' })
+ .afterClosed().subscribe((res) => {
+ if (!res) {
+ item.users = [...currGroupSource.users];
+ item.selected_roles = [...currGroupSource.selected_roles];
+ item.roles = [...currGroupSource.roles];
+ } else {
+ this.manageRolesGroups({
+ action, type, value: {
+ name: item.group,
+ roleIds: this.extractIds(this.roles, item.selected_roles.map(v => v.role)),
+ users: item.users || []
+ }
+ });
+ }
+ deletedUsers = [];
});
}
this.getEnvironmentHealthStatus();
@@ -136,17 +162,21 @@
public manageRolesGroups($event) {
switch ($event.action) {
case 'create':
+
this.rolesService.setupNewGroup($event.value).subscribe(res => {
this.toastr.success('Group creation success!', 'Created!');
this.getGroupsData();
}, () => this.toastr.error('Group creation failed!', 'Oops!'));
break;
+
case 'update':
this.rolesService.updateGroup($event.value).subscribe(res => {
- this.toastr.success('Group data successfully updated!', 'Success!');
- this.getGroupsData();
+ this.toastr.success(`Group data is updated successfully!`, 'Success!');
+ this.openManageRolesDialog();
}, () => this.toastr.error('Failed group data updating!', 'Oops!'));
+
break;
+
case 'delete':
if ($event.type === 'users') {
this.rolesService.removeUsersForGroup($event.value).subscribe(res => {
@@ -160,6 +190,7 @@
}, (error) => this.toastr.error(error.message, 'Oops!'));
}
break;
+
default:
}
}
@@ -172,10 +203,20 @@
}
public updateGroupData(groups) {
- this.groupsData = groups.map(v=>v).sort((a,b) => (a.group > b.group) ? 1 : ((b.group > a.group) ? -1 : 0));
+ this.groupsData = groups.map(v => {
+ if (!v.users) {
+ v.users = [];
+ }
+ return v;
+ }).sort((a, b) => (a.group > b.group) ? 1 : ((b.group > a.group) ? -1 : 0));
this.groupsData.forEach(item => {
- item.selected_roles = item.roles.map(role => role.description);
+ item.selected_roles = item.roles.map(role => ({role: role.description, type: role.type, cloud: role.cloud}));
});
+ this.getGroupsListCopy();
+ }
+
+ private getGroupsListCopy() {
+ this.startedGroups = JSON.parse(JSON.stringify(this.groupsData));
}
public groupValidarion(): ValidatorFn {
@@ -192,8 +233,19 @@
});
}
- public compareObjects(o1: any, o2: any): boolean {
- return o1.toLowerCase() === o2.toLowerCase();
+ private isGroupChanded(currGroup) {
+ const currGroupSource = this.startedGroups.filter(cur => cur.group === currGroup.group)[0];
+ if (currGroup.users.length !== currGroupSource.users.length &&
+ currGroup.selected_roles.length !== currGroupSource.selected_roles.length) {
+ return false;
+ }
+ return JSON.stringify(currGroup.users) === JSON.stringify(currGroupSource.users) &&
+ JSON.stringify(
+ currGroup.selected_roles.map(role => role.role).sort()
+ ) === JSON
+ .stringify(
+ currGroupSource.selected_roles.map(role => role.role).sort()
+ );
}
public resetDialog() {
@@ -219,6 +271,14 @@
this.healthStatusService.getEnvironmentHealthStatus()
.subscribe((result: any) => this.healthStatus = result);
}
+
+ public onUpdate($event): void {
+ if ($event.type) {
+ this.groupsData.filter(group => group.group === $event.type)[0].selected_roles = $event.model;
+ } else {
+ this.setupRoles = $event.model;
+ }
+ }
}
@@ -241,6 +301,7 @@
`,
styles: [`.group-name { max-width: 96%; display: inline-block; vertical-align: bottom; }`]
})
+
export class ConfirmDeleteUserAccountDialogComponent {
constructor(
public dialogRef: MatDialogRef<ConfirmDeleteUserAccountDialogComponent>,
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
index 555c341..e6583f6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/applicationServiceFacade.service.ts
@@ -47,6 +47,7 @@
private static readonly IMAGE = 'image';
private static readonly SCHEDULER = 'scheduler';
private static readonly TEMPLATES = 'templates';
+ private static readonly COMPUTATION_TEMPLATES = 'computation_templates';
private static readonly COMPUTATIONAL_RESOURCES_TEMLATES = 'computational_templates';
private static readonly COMPUTATIONAL_RESOURCES = 'computational_resources';
private static readonly COMPUTATIONAL_RESOURCES_DATAENGINE = 'computational_resources_dataengine';
@@ -182,6 +183,12 @@
null);
}
+ public buildGetComputationTemplatesRequest(params, provider): Observable<any> {
+ return this.buildRequest(HTTPMethod.GET,
+ '/api/' + provider + this.requestRegistry.Item(ApplicationServiceFacade.COMPUTATION_TEMPLATES) + params,
+ null);
+ }
+
public buildCreateExploratoryEnvironmentRequest(data): Observable<any> {
return this.buildRequest(HTTPMethod.PUT,
this.requestRegistry.Item(ApplicationServiceFacade.EXPLORATORY_ENVIRONMENT),
@@ -560,12 +567,6 @@
null);
}
- public buildDeleteProject(param): Observable<any> {
- return this.buildRequest(HTTPMethod.DELETE,
- this.requestRegistry.Item(ApplicationServiceFacade.PROJECT) + param,
- null);
- }
-
public buildToggleProjectStatus(param, data): Observable<any> {
return this.buildRequest(HTTPMethod.POST,
this.requestRegistry.Item(ApplicationServiceFacade.PROJECT) + param,
@@ -650,6 +651,8 @@
'/api/infrastructure_provision/exploratory_environment');
this.requestRegistry.Add(ApplicationServiceFacade.TEMPLATES,
'/api/infrastructure_templates');
+ this.requestRegistry.Add(ApplicationServiceFacade.COMPUTATION_TEMPLATES,
+ '/infrastructure_provision/computational_resources');
this.requestRegistry.Add(ApplicationServiceFacade.IMAGE,
'/api/infrastructure_provision/exploratory_environment/image');
this.requestRegistry.Add(ApplicationServiceFacade.SCHEDULER,
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
index fe61c75..5d35eec 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/dataengineConfiguration.service.ts
@@ -28,8 +28,8 @@
export class DataengineConfigurationService {
constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
- public getClusterConfiguration(exploratory, cluster, provider): Observable<{}> {
- const url = `/${exploratory}/${cluster}/config`;
+ public getClusterConfiguration(project, exploratory, cluster, provider): Observable<{}> {
+ const url = `/${project}/${exploratory}/${cluster}/config`;
return this.applicationServiceFacade
.buildGetClusterConfiguration(url, provider)
.pipe(
@@ -37,8 +37,8 @@
catchError(ErrorUtils.handleServiceError));
}
- public editClusterConfiguration(data, exploratory, cluster, provider): Observable<{}> {
- const url = `/dataengine/${exploratory}/${cluster}/config`;
+ public editClusterConfiguration(data, project, exploratory, cluster, provider): Observable<{}> {
+ const url = `/dataengine/${project}/${exploratory}/${cluster}/config`;
return this.applicationServiceFacade
.buildEditClusterConfiguration(url, data, provider)
.pipe(
@@ -46,8 +46,8 @@
catchError(ErrorUtils.handleServiceError));
}
- public getExploratorySparkConfiguration(exploratory): Observable<{}> {
- const url = `/${exploratory}/cluster/config`;
+ public getExploratorySparkConfiguration(project, exploratory): Observable<{}> {
+ const url = `/${project}/${exploratory}/cluster/config`;
return this.applicationServiceFacade
.buildGetExploratorySparkConfiguration(url)
.pipe(
@@ -55,8 +55,8 @@
catchError(ErrorUtils.handleServiceError));
}
- public editExploratorySparkConfiguration(data, exploratory): Observable<{}> {
- const url = `/${exploratory}/reconfigure`;
+ public editExploratorySparkConfiguration(data, project, exploratory): Observable<{}> {
+ const url = `/${project}/${exploratory}/reconfigure`;
return this.applicationServiceFacade
.buildEditExploratorySparkConfiguration(url, data)
.pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
index d4f8942..2119b1a 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/librariesInstallation.service.ts
@@ -28,8 +28,8 @@
export class LibrariesInstallationService {
constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
- public getGroupsList(exploratory, computational?): Observable<Response> {
- let body = `?exploratory_name=${exploratory}`;
+ public getGroupsList(project, exploratory, computational?): Observable<Response> {
+ let body = `?project_name=${project}&exploratory_name=${exploratory}`;
if (computational) body += `&computational_name=${computational}`;
return this.applicationServiceFacade
@@ -65,8 +65,8 @@
catchError(ErrorUtils.handleServiceError));
}
- public getInstalledLibrariesList(exploratory): Observable<{}> {
- const body = `?exploratory_name=${exploratory}`;
+ public getInstalledLibrariesList(project, exploratory): Observable<{}> {
+ const body = `?project_name=${project}&exploratory_name=${exploratory}`;
return this.applicationServiceFacade
.buildGetInstalledLibrariesList(body)
@@ -75,8 +75,8 @@
catchError(ErrorUtils.handleServiceError));
}
- public getInstalledLibsByResource(exploratory, computational?): Observable<{}> {
- let body = `?exploratory_name=${exploratory}`;
+ public getInstalledLibsByResource(project, exploratory, computational?): Observable<{}> {
+ let body = `?project_name=${project}&exploratory_name=${exploratory}`;
if (computational) body += `&computational_name=${computational}`;
return this.applicationServiceFacade
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
index 4b759b6..2d21fd5 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/managementEnvironments.service.ts
@@ -36,8 +36,8 @@
catchError(ErrorUtils.handleServiceError));
}
- environmentManagement(data, action: string, resource: string, computational?: string): Observable<{}> {
- const params = computational ? `/${action}/${resource}/${computational}` : `/${action}/${resource}`;
+ environmentManagement(data, action: string, project: string, resource: string, computational?: string): Observable<{}> {
+ const params = computational ? `/${action}/${project}/${resource}/${computational}` : `/${action}/${project}/${resource}`;
return this.applicationServiceFacade
.buildEnvironmentManagement(params, data)
.pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
index 805c8e8..0bb54b5 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/project.service.ts
@@ -70,15 +70,6 @@
catchError(ErrorUtils.handleServiceError));
}
- public deleteProject(data): Observable<{}> {
- const url = `/${data}`;
- return this.applicationServiceFacade
- .buildDeleteProject(url)
- .pipe(
- map(response => response),
- catchError(ErrorUtils.handleServiceError));
- }
-
public toggleProjectStatus(data, action): Observable<{}> {
const url = `/${action}`;
return this.applicationServiceFacade
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
index c595486..a854305 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/scheduler.service.ts
@@ -29,8 +29,8 @@
export class SchedulerService {
constructor(private applicationServiceFacade: ApplicationServiceFacade) {}
- public getExploratorySchedule(notebook, resource?): Observable<{}> {
- const param = resource ? `/${notebook}/${resource}` : `/${notebook}`;
+ public getExploratorySchedule(project, notebook, resource?): Observable<{}> {
+ const param = resource ? `/${project}/${notebook}/${resource}` : `/${project}/${notebook}`;
return this.applicationServiceFacade
.buildGetExploratorySchedule(param)
.pipe(
@@ -38,8 +38,8 @@
catchError(ErrorUtils.handleServiceError));
}
- public setExploratorySchedule(notebook, data, resource?): Observable<ScheduleSchema> {
- const param = resource ? `/${notebook}/${resource}` : `/${notebook}`;
+ public setExploratorySchedule(project, notebook, data, resource?): Observable<ScheduleSchema> {
+ const param = resource ? `/${project}/${notebook}/${resource}` : `/${project}/${notebook}`;
return this.applicationServiceFacade
.buildSetExploratorySchedule(param, data)
.pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts b/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
index a41bd29..6f7c254 100644
--- a/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/core/services/userResource.service.ts
@@ -37,10 +37,10 @@
catchError(ErrorUtils.handleServiceError));
}
- public getComputationalTemplates(project, endpoint): Observable<any> {
- const url = `/${project}/${endpoint}/computational_templates`;
+ public getComputationalTemplates(project, endpoint, provider): Observable<any> {
+ const url = `/${project}/${endpoint}/templates`;
return this.applicationServiceFacade
- .buildGetTemplatesRequest(url)
+ .buildGetComputationTemplatesRequest(url, provider)
.pipe(
map(response => response),
catchError(ErrorUtils.handleServiceError));
@@ -81,7 +81,7 @@
}
public suspendExploratoryEnvironment(notebook: any, action): Observable<{}> {
- const url = '/' + notebook.name + '/' + action;
+ const url = '/' + notebook.project + '/' + notebook.name + '/' + action;
return this.applicationServiceFacade
.buildSuspendExploratoryEnvironmentRequest(JSON.stringify(url))
@@ -108,8 +108,8 @@
catchError(ErrorUtils.handleServiceError));
}
- public suspendComputationalResource(notebookName: string, computationalResourceName: string, provider: string): Observable<{}> {
- const body = JSON.stringify('/' + notebookName + '/' + computationalResourceName + '/terminate');
+ public suspendComputationalResource(projectName: string, notebookName: string, computationalResourceName: string, provider: string): Observable<{}> {
+ const body = JSON.stringify('/' + projectName + '/' + notebookName + '/' + computationalResourceName + '/terminate');
return this.applicationServiceFacade
.buildDeleteComputationalResourcesRequest(body, provider)
.pipe(
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
index 5de5725..932bd3e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.html
@@ -31,7 +31,7 @@
</button>
</th>
<td mat-cell *matCellDef="let element"> {{element[DICTIONARY[PROVIDER].billing.dlabId]}} </td>
- <td mat-footer-cell *matFooterCellDef class="table-footer"> Total </td>
+ <td mat-footer-cell *matFooterCellDef class="table-footer"></td>
</ng-container>
<ng-container matColumnDef="user">
@@ -106,7 +106,7 @@
</button>
</th>
<td mat-cell *matCellDef="let element">
- <span [outerHTML]="element.size | lineBreak"></span>
+ <span>{{element[DICTIONARY[PROVIDER].billing.instance_size]}}</span>
</td>
<td mat-footer-cell *matFooterCellDef class="table-footer"></td>
</ng-container>
@@ -123,7 +123,6 @@
</button>
</th>
<td mat-cell *matCellDef="let element">
- {{ element['service'] }}
<span *ngIf="element[DICTIONARY[PROVIDER].billing.service]">{{ element[DICTIONARY[PROVIDER].billing.service] }}</span>
</td>
<td mat-footer-cell *matFooterCellDef class="table-footer"></td>
@@ -138,7 +137,7 @@
{{ element[DICTIONARY[PROVIDER].billing.cost] }} {{ element[DICTIONARY[PROVIDER].billing.currencyCode] }}
</td>
<td mat-footer-cell *matFooterCellDef class="table-footer">
- <span *ngIf="reportData?.length">{{ fullReport['cost_total'] }}
+ Total <span *ngIf="reportData?.length"> {{ fullReport['cost_total'] }}
{{ fullReport[DICTIONARY[PROVIDER].billing.currencyCode] }}</span>
</td>
</ng-container>
@@ -178,8 +177,8 @@
<ng-container matColumnDef="shape-filter">
<th mat-header-cell *matHeaderCellDef>
<multi-select-dropdown *ngIf="filterConfiguration" (selectionChange)="onUpdate($event)"
- [type]="[DICTIONARY[PROVIDER].billing.instance_size]" [items]="filterConfiguration[DICTIONARY[PROVIDER].billing.instance_size]"
- [model]="filteredReportData[DICTIONARY[PROVIDER].billing.instance_size]"></multi-select-dropdown>
+ [type]="'shape'"[items]="filterConfiguration['shape']"
+ [model]="filteredReportData['shape']"></multi-select-dropdown>
</th>
</ng-container>
<ng-container matColumnDef="service-filter">
@@ -209,7 +208,6 @@
</td>
</ng-container>
-
<tr mat-header-row *matHeaderRowDef="displayedColumns; sticky: true" class="header-row"></tr>
<tr [hidden]="!collapseFilterRow || !PROVIDER" mat-header-row *matHeaderRowDef="displayedFilterColumns; sticky: true"
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
index 3b19ae7..f8f872f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting-grid/reporting-grid.component.scss
@@ -113,7 +113,7 @@
.th_charges {
width: 8%;
- min-width: 110px;
+ min-width: 130px;
padding-right: 15px;
text-align: right;
@@ -148,9 +148,15 @@
.header-row {
position: unset;
+ .th_charges {
+ padding-top: 0;
+ .label {
+ padding-top: 12px;
+ }
+ }
.label {
display: inline-block;
- padding-top: 15px;
+ padding-top: 13px;
vertical-align: super !important;
padding-left: 15px;
}
@@ -161,6 +167,16 @@
text-align: right;
}
}
+
+ .table-footer{
+ position: sticky;
+ bottom: 0;
+ background: inherit;
+ border-top: 1px solid #E0E0E0;
+ transform: translateY(-1px);
+ border-bottom: none;
+ padding-left: 0 !important;
+ }
}
.dashboard_table_body {
@@ -177,14 +193,7 @@
}
}
-.table-footer{
- position: sticky;
- bottom: 0;
- background: inherit;
- border-top: 1px solid #E0E0E0;
- transform: translateY(-1px);
- border-bottom: none;
-}
+
@media screen and (max-width: 1280px) {
.dashboard_table.reporting {
diff --git a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
index 71a5c61..958eb23 100644
--- a/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/reporting/reporting.component.ts
@@ -79,7 +79,7 @@
) { }
ngOnInit() {
- this.getBillingProvider();
+ this.getEnvironmentHealthStatus();
}
ngOnDestroy() {
@@ -87,11 +87,9 @@
}
getBillingProvider() {
- this.getEnvironmentHealthStatus();
if (this.admin) {
this.endpointService.getEndpointsData().subscribe(list => {
- // @ts-ignore
- const endpoints = [...list];
+ const endpoints = JSON.parse(JSON.stringify(list));
const localEndpoint = endpoints.filter(endpoint => endpoint.name === 'local');
if (localEndpoint.length) {
this.PROVIDER = localEndpoint[0].cloudProvider.toLowerCase();
@@ -232,6 +230,7 @@
.subscribe((result: any) => {
this.billingEnabled = result.billingEnabled;
this.admin = result.admin;
+ this.getBillingProvider();
});
}
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
index 75ea01e..11002e1 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/cluster-details/cluster-details.component.ts
@@ -91,14 +91,14 @@
public getClusterConfiguration(): void {
this.dataengineConfigurationService
- .getClusterConfiguration(this.environment.name, this.resource.computational_name, this.PROVIDER)
+ .getClusterConfiguration(this.environment.project, this.environment.name, this.resource.computational_name, this.PROVIDER)
.subscribe((result: any) => this.config = result,
error => this.toastr.error(error.message || 'Configuration loading failed!', 'Oops!'));
}
public editClusterConfiguration(data): void {
this.dataengineConfigurationService
- .editClusterConfiguration(data.configuration_parameters, this.environment.name, this.resource.computational_name, this.PROVIDER)
+ .editClusterConfiguration(data.configuration_parameters, this.environment.project, this.environment.name, this.resource.computational_name, this.PROVIDER)
.subscribe(result => {
this.dialogRef.close();
},
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
index aab4e24..ec3c3ac 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.html
@@ -69,15 +69,16 @@
<label class="label">Cluster alias</label>
<div class="control">
<input
- [class.danger_field]="computationalResourceExist || !resourceForm?.controls['cluster_alias_name'].valid
+ [class.danger_field]="!resourceForm?.controls['cluster_alias_name'].valid
&& resourceForm?.controls['cluster_alias_name'].dirty && resourceForm?.controls['cluster_alias_name'].hasError('duplication')"
type="text" class="form-control" placeholder="Enter cluster alias"
formControlName="cluster_alias_name" />
- <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('duplication')">This
- cluster name already exists.</span>
+ <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('user-duplication')">You have cluster with this name in current project.</span>
+ <span class="error" *ngIf="resourceForm?.controls['cluster_alias_name'].hasError('other-user-duplication')">Other user has cluster with this name in current project.</span>
<span class="error" *ngIf="!resourceForm?.controls.cluster_alias_name.valid
&& resourceForm?.controls['cluster_alias_name'].dirty
- && !resourceForm?.controls['cluster_alias_name'].hasError('duplication')">
+ && !resourceForm?.controls['cluster_alias_name'].hasError('user-duplication')
+ && !resourceForm?.controls['cluster_alias_name'].hasError('other-user-duplication')">
Cluster name cannot be longer than {{DICTIONARY[PROVIDER].max_cluster_name_length}} characters
and can only contain letters, numbers, hyphens and '_' but can not end with special
characters
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
index 29ef7d4..0c867f9 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resource-create-dialog/computational-resource-create-dialog.component.ts
@@ -44,6 +44,8 @@
notebook_instance: any;
resourcesList: any;
clusterTypes = [];
+ userComputations = [];
+ projectComputations = [];
selectedImage: any;
spotInstance: boolean = true;
@@ -76,7 +78,7 @@
this.notebook_instance = this.data.notebook;
this.resourcesList = this.data.full_list;
this.initFormModel();
- this.getTemplates(this.notebook_instance.project, this.notebook_instance.endpoint);
+ this.getTemplates(this.notebook_instance.project, this.notebook_instance.endpoint, this.notebook_instance.cloud_provider);
}
public selectImage($event) {
@@ -229,16 +231,24 @@
}
private checkDuplication(control) {
- if (this.containsComputationalResource(control.value))
- return { duplication: true };
+ if (this.containsComputationalResource(control.value, this.userComputations)){
+ return { 'user-duplication': true };
+ }
+
+ if (this.containsComputationalResource(control.value, this.projectComputations)){
+ return { 'other-user-duplication': true };
+ }
}
- private getTemplates(project, endpoint) {
- this.userResourceService.getComputationalTemplates(project, endpoint).subscribe(
+ private getTemplates(project, endpoint, provider) {
+ this.userResourceService.getComputationalTemplates(project, endpoint, provider).subscribe(
clusterTypes => {
- this.clusterTypes = clusterTypes;
+ this.clusterTypes = clusterTypes.templates;
+ this.userComputations = clusterTypes.user_computations;
+ this.projectComputations = clusterTypes.project_computations;
+
this.clusterTypes.forEach((cluster, index) => this.clusterTypes[index].computation_resources_shapes = SortUtils.shapesSort(cluster.computation_resources_shapes));
- this.selectedImage = clusterTypes[0];
+ this.selectedImage = clusterTypes.templates[0];
if (this.selectedImage) {
this._ref.detectChanges();
@@ -284,10 +294,10 @@
return filtered;
}
- private containsComputationalResource(conputational_resource_name: string): boolean {
+ private containsComputationalResource(conputational_resource_name: string, existNames: Array<string>): boolean {
if (conputational_resource_name) {
- return this.notebook_instance.resources.some(resource =>
- CheckUtils.delimitersFiltering(conputational_resource_name) === CheckUtils.delimitersFiltering(resource.computational_name));
+ return existNames.some(resource =>
+ CheckUtils.delimitersFiltering(conputational_resource_name) === CheckUtils.delimitersFiltering(resource));
}
}
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
index e55ab94..c47cc43 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.scss
@@ -110,7 +110,8 @@
}
}
@media screen and (max-width: 1520px) {
- resources-grid {
+ .resources,
+ managment {
.source {
.resource-wrap {
.resource-name {
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
index f7bfa1d..7bc5126 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/computational/computational-resources-list/computational-resources-list.component.ts
@@ -60,7 +60,7 @@
});
} else if (result && action === 'terminate') {
this.userResourceService
- .suspendComputationalResource(this.environment.name, resource.computational_name, this.environment.cloud_provider)
+ .suspendComputationalResource(this.environment.project, this.environment.name, resource.computational_name, this.environment.cloud_provider)
.subscribe(() => {
this.rebuildGrid();
});
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
index 1e7c25c..4a7a4a6 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.html
@@ -33,9 +33,7 @@
image name already exists in project.</span>
<span class="error"
*ngIf="!createAMIForm.valid && createAMIForm.controls['name'].dirty && !createAMIForm.controls['name'].hasError('duplication')">
- image name
- <span *ngIf="provider === 'azure'"> cannot be longer than 10 characters and</span>
- can only contain letters, numbers, hyphens and '_'</span>
+ Name cannot be longer than 10 characters and can only contain letters, numbers, hyphens and '_' but can not end with special characters</span>
</div>
</div>
<div class="control-group">
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
index ff17382..e783951 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/ami-create-dialog/ami-create-dialog.component.ts
@@ -23,7 +23,7 @@
import { ToastrService } from 'ngx-toastr';
import { UserResourceService } from '../../../core/services';
-import { HTTP_STATUS_CODES } from '../../../core/util';
+import {HTTP_STATUS_CODES, PATTERNS} from '../../../core/util';
import { DICTIONARY } from '../../../../dictionary/global.dictionary';
@Component({
@@ -36,7 +36,6 @@
public notebook: any;
public createAMIForm: FormGroup;
public provider: string;
- namePattern = '[-_a-zA-Z0-9]+';
delimitersRegex = /[-_]?/g;
imagesList: any;
@@ -64,9 +63,10 @@
private initFormModel(): void {
this.createAMIForm = this._fb.group({
- name: ['', [Validators.required, Validators.pattern(this.namePattern), this.providerMaxLength, this.checkDuplication.bind(this)]],
+ name: ['', [Validators.required, Validators.pattern(PATTERNS.namePattern), this.providerMaxLength, this.checkDuplication.bind(this)]],
description: [''],
- exploratory_name: [this.notebook.name]
+ exploratory_name: [this.notebook.name],
+ project_name: [this.notebook.project]
});
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
index c827bd7..c2f607a 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/detail-dialog/detail-dialog.component.ts
@@ -83,7 +83,7 @@
public getClusterConfiguration(): void {
this.dataengineConfigurationService
- .getExploratorySparkConfiguration(this.notebook.name)
+ .getExploratorySparkConfiguration(this.notebook.project, this.notebook.name)
.subscribe(
(result: any) => this.config = result,
error => this.toastr.error(error.message || 'Configuration loading failed!', 'Oops!'));
@@ -102,7 +102,7 @@
public editClusterConfiguration(data): void {
this.dataengineConfigurationService
- .editExploratorySparkConfiguration(data.configuration_parameters, this.notebook.name)
+ .editExploratorySparkConfiguration(data.configuration_parameters, this.notebook.project, this.notebook.name)
.subscribe(result => {
this.dialogRef.close();
},
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html
index b0c2092..968f1a3 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.html
@@ -28,7 +28,7 @@
<p class=" message">Cannot install libraries: Exploratory
<strong>{{ notebook?.name }}</strong> is not running</p>
</div>
- <div class="loading-block" *ngIf="!libs_uploaded && uploading && notebook?.status === 'running'">
+ <div class="loading-block" *ngIf="!libs_uploaded && uploading && data.status === 'running'">
<div class="uploading">
<p>Please wait until DLab loads full list of available libraries for you...</p>
<img src="assets/img/gif-spinner.gif" alt="loading">
@@ -48,12 +48,20 @@
<label class="label">Select group</label>
<div class="control">
<dropdown-list #groupSelect (selectedItem)="onUpdate($event)"></dropdown-list>
+ <span class="error-message" *ngIf="!group && libSearch.value">Group field is required. Please choose appropriate group.</span>
</div>
</div>
</div>
<div class="search">
<mat-form-field class="chip-list">
- <input type="text" [placeholder]="group === 'java' ? 'Enter library name in <groupId>:<artifactId>:<versionId> format' : 'Enter library name'" matInput [formControl]="libSearch" [value]="query" [matAutocomplete]="auto">
+ <input
+ type="text"
+ [placeholder]="group === 'java' ? 'Enter library name in <groupId>:<artifactId>:<versionId> format' : 'Enter library name'"
+ matInput
+ [formControl]="libSearch"
+ [value]="query"
+ [matAutocomplete]="auto"
+ >
<mat-icon matSuffix>search</mat-icon>
<mat-autocomplete #auto="matAutocomplete" class="suggestions">
<ng-template ngFor let-item [ngForOf]="filteredList" let-i="index">
@@ -84,7 +92,7 @@
</mat-option>
</mat-autocomplete>
</mat-form-field>
- <div class="list-selected list-container" id="scrolling">
+ <div class="list-selected list-container" id='scrolling'>
<mat-chip-list *ngIf="model.selectedLibs.length && libs_uploaded">
<mat-chip *ngFor="let item of model.selectedLibs">
{{ item.name }}
@@ -140,7 +148,7 @@
<ng-container *ngIf="filtered" >
<mat-list-item class="lib-col filter-row">
<th class="lib-name lib-input">
- <input placeholder="Filter by library name" [value]="filterModel.name" (input)="filterModel.name = $event.target.value" type="text" class="form-control filter-field "/>
+ <input placeholder="Filter by library name" [value]="filterModel.name" (input)="filterModel.name = $event.target['value']" type="text" class="form-control filter-field "/>
</th>
<th class="lib-group lib-col">
<multi-select-dropdown
@@ -185,7 +193,7 @@
</button>
<button mat-icon-button class="btn apply" (click)="filterLibs()">
- <i class="material-icons" [ngClass]="{'not-allowed': filterModel.length == 0}">done</i>
+ <i class="material-icons" [ngClass]="{'not-allowed': filterModel.length === 0}">done</i>
</button>
</div>
</th>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
index 2f8e28d..a6b7070 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.scss
@@ -444,6 +444,14 @@
}
}
+.error-message{
+ position: absolute;
+ left: 20%;
+ top: 40px;
+ font-size: 11px;
+ color: red;
+}
+
@media screen and (min-width: 1281px) {
.libs-info {
height: 60%;
@@ -456,3 +464,16 @@
}
}
}
+
+@media screen and (max-height: 800px) {
+ .libs-info {
+ height: 50%;
+
+ .mat-list {
+ .scrollingList {
+ max-height: 140px;
+ height: 60%;
+ }
+ }
+ }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
index a4bfe90..2f2d733 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.component.ts
@@ -89,14 +89,15 @@
}
ngOnInit() {
- this.libSearch.disable();
+ this.open(this.data);
+ this.uploadLibGroups();
this.libSearch.valueChanges.pipe(
debounceTime(1000))
.subscribe(newValue => {
this.query = newValue || '';
this.filterList();
});
- this.open(this.data);
+ this.getInstalledLibsByResource();
}
ngOnDestroy() {
@@ -105,7 +106,9 @@
}
uploadLibGroups(): void {
- this.librariesInstallationService.getGroupsList(this.notebook.name, this.model.computational_name)
+ this.libs_uploaded = false;
+ this.uploading = true;
+ this.librariesInstallationService.getGroupsList(this.notebook.project, this.notebook.name, this.model.computational_name)
.subscribe(
response => {
this.libsUploadingStatus(response);
@@ -153,13 +156,11 @@
this.group = $event.model.value;
} else if ($event.model.type === 'destination') {
this.resetDialog();
-
this.destination = $event.model.value;
this.destination && this.destination.type === 'СOMPUTATIONAL'
? this.model.computational_name = this.destination.name
: this.model.computational_name = null;
- this.libSearch.enable();
this.uploadLibGroups();
this.getInstalledLibsByResource();
}
@@ -189,7 +190,6 @@
this.model.selectedLibs.push({ group: this.group, name: item.name, version: item.version });
this.query = '';
this.libSearch.setValue('');
-
this.filteredList = null;
}
@@ -199,6 +199,7 @@
public open(notebook): void {
this.notebook = notebook;
+ this.destination = this.getResourcesList()[0];
this.model = new InstallLibrariesModel(notebook,
response => {
if (response.status === HTTP_STATUS_CODES.OK) {
@@ -213,7 +214,7 @@
this.selectorsReset();
},
this.librariesInstallationService);
- }
+ }
public showErrorMessage(item): void {
const dialogRef: MatDialogRef<ErrorMessageDialogComponent> = this.dialog.open(
@@ -221,8 +222,8 @@
}
public isInstallingInProgress(): void {
- const isInstallingNow = this.notebookLibs.some(lib => lib.filteredStatus.some(status => status.status === 'installing'));
- if (isInstallingNow) {
+ this.installingInProgress = this.notebookLibs.some(lib => lib.filteredStatus.some(status => status.status === 'installing'));
+ if (this.installingInProgress) {
clearTimeout(this.loadLibsTimer);
this.loadLibsTimer = window.setTimeout(() => this.getInstalledLibrariesList(), 10000);
}
@@ -269,7 +270,7 @@
}
private getInstalledLibsByResource() {
- this.librariesInstallationService.getInstalledLibsByResource(this.notebook.name, this.model.computational_name)
+ this.librariesInstallationService.getInstalledLibsByResource(this.notebook.project, this.notebook.name, this.model.computational_name)
.subscribe((data: any) => this.destination.libs = data);
}
@@ -307,9 +308,9 @@
}
private selectorsReset(): void {
- this.resource_select && this.resource_select.setDefaultOptions(this.getResourcesList(),
- 'Select resource', 'destination', 'title', 'array');
- this.group_select && this.group_select.setDefaultOptions([], '', 'group_lib', null, 'array');
+ this.destination = this.getResourcesList()[0];
+ this.uploadLibGroups();
+ this.getInstalledLibsByResource();
}
private resetDialog(): void {
@@ -321,10 +322,8 @@
this.uploading = false;
this.model.selectedLibs = [];
this.filteredList = null;
- this.destination = null;
this.groupsList = [];
- this.libSearch.disable();
clearTimeout(this.clear);
clearTimeout(this.loadLibsTimer);
this.selectorsReset();
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
index 6d0369e..b201904 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/exploratory/install-libraries/install-libraries.model.ts
@@ -56,7 +56,8 @@
}
public getLibrariesList(group: string, query: string): Observable<{}> {
- let lib_query: any = {
+ const lib_query: any = {
+ project_name: this.notebook.project,
exploratory_name: this.notebook.name,
group: group,
start_with: query
@@ -75,12 +76,13 @@
public getInstalledLibrariesList(notebook): Observable<{}> {
return this.librariesInstallationService.getInstalledLibrariesList(
- notebook.name
+ notebook.project, notebook.name
);
}
private installLibraries(retry?: Library, item?): Observable<{}> {
- let lib_list: any = {
+ const lib_list: any = {
+ project_name: this.notebook.project,
exploratory_name: this.notebook.name,
libs: retry ? retry : this.selectedLibs
};
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
index b8c061b..ea0c65f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.html
@@ -93,7 +93,9 @@
</th>
</ng-container>
<ng-container matColumnDef="actions" stickyEnd>
- <th mat-header-cell *matHeaderCellDef class="actions-col"></th>
+ <th mat-header-cell *matHeaderCellDef class="actions-col">
+ <span class="label"> Actions </span>
+ </th>
</ng-container>
<!-- ----------------------------------------------------- -->
@@ -168,10 +170,10 @@
</div>
</li>
<li *ngIf="element.status.toLowerCase() === 'stopped' || element.status.toLowerCase() === 'stopping'"
- matTooltip="Unable to run notebook until it will be stopped" matTooltipPosition="above"
- [matTooltipDisabled]="!isResourcesInProgress(element) && element.status.toLowerCase() !== 'stopping'">
+ matTooltip="{{isEdgeNodeStopped(element) ? 'Unable to run notebook if edge node is stopped.' : 'Unable to run notebook until it will be stopped.'}}" matTooltipPosition="above"
+ [matTooltipDisabled]="!isResourcesInProgress(element) && element.status.toLowerCase() !== 'stopping' && !isEdgeNodeStopped(element)">
<div (click)="exploratoryAction(element, 'run')"
- [ngClass]="{'not-allowed': isResourcesInProgress(element) || element.status.toLowerCase() === 'stopping' }">
+ [ngClass]="{'not-allowed': isResourcesInProgress(element) || element.status.toLowerCase() === 'stopping' || isEdgeNodeStopped(element) }">
<i class="material-icons">play_circle_outline</i>
<span>Run</span>
</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
index 7ba14e5..1657a56 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.scss
@@ -95,11 +95,17 @@
padding-right: 5px;
padding-left: 24px;
background-color: inherit;
+ .label{
+ padding-top: 14px;
+ }
}
.status-col,
.shape-col {
width: 14%;
+ .label{
+ padding-top: 14px;
+ }
}
.tag-col {
@@ -120,6 +126,9 @@
.resources-col {
width: 28%;
+ .label{
+ padding-top: 14px;
+ }
}
.cost-col {
@@ -132,6 +141,9 @@
padding-right: 24px;
text-align: right;
background-color: inherit;
+ .label{
+ padding-right: 5px;
+ }
}
}
@@ -227,6 +239,7 @@
.filter-row .actions {
text-align: right;
display: flex;
+ justify-content: flex-end;
}
.filter-row .actions button {
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
index ce14e87..861981e 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources-grid/resources-grid.component.ts
@@ -18,7 +18,7 @@
*/
/* tslint:disable:no-empty */
-import { Component, OnInit } from '@angular/core';
+import {Component, Input, OnInit} from '@angular/core';
import { animate, state, style, transition, trigger } from '@angular/animations';
import { ToastrService } from 'ngx-toastr';
import { MatDialog } from '@angular/material/dialog';
@@ -63,6 +63,8 @@
export class ResourcesGridComponent implements OnInit {
readonly DICTIONARY = DICTIONARY;
+ @Input() projects: Array<any>;
+
environments: Exploratory[];
collapseFilterRow: boolean = false;
@@ -142,7 +144,7 @@
public isResourcesInProgress(notebook) {
- const env = this.getResourceByName(notebook.name);
+ const env = this.getResourceByName(notebook.name, notebook.project);
if (env && env.resources && env.resources.length) {
return env.resources.filter(item => (item.status !== 'failed' && item.status !== 'terminated'
@@ -151,6 +153,12 @@
return false;
}
+ public isEdgeNodeStopped(resource) {
+ const currProject = this.projects.filter(proj => proj.name === resource.project);
+ const currEdgenodeStatus = currProject[0].endpoints.filter(node => node.name === resource.endpoint)[0].status;
+ return currEdgenodeStatus === 'STOPPED' || currEdgenodeStatus === 'STOPPING';
+ }
+
public filterActiveInstances(): FilterConfigurationModel {
return (<FilterConfigurationModel | any>Object).assign({}, this.filterConfiguration, {
statuses: SortUtils.activeStatuses(),
@@ -182,7 +190,7 @@
}
public exploratoryAction(data, action: string) {
- const resource = this.getResourceByName(data.name);
+ const resource = this.getResourceByName(data.name, data.project);
if (action === 'deploy') {
this.dialog.open(ComputationalResourceCreateDialogComponent, { data: { notebook: resource, full_list: this.environments }, panelClass: 'modal-xxl' })
@@ -215,8 +223,8 @@
// PRIVATE
- private getResourceByName(notebook_name: string) {
- return this.getEnvironmentsListCopy()
+ private getResourceByName(notebook_name: string, project_name: string) {
+ return this.getEnvironmentsListCopy().filter(environments => environments.project === project_name)
.map(env => env.exploratory.find(({ name }) => name === notebook_name))
.filter(notebook_name => !!notebook_name)[0];
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html b/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html
index b705c38..091ccb7 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/resources.component.html
@@ -63,5 +63,5 @@
</div>
</div>
<mat-divider></mat-divider>
- <resources-grid></resources-grid>
+ <resources-grid [projects] = "projects"></resources-grid>
</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
index 20b8ce5..0df2a59 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.component.ts
@@ -88,7 +88,7 @@
public open(notebook, type, resource?): void {
this.notebook = notebook;
-
+ console.log(this.notebook)
this.zones = _moment.tz.names()
.map(item => [_moment.tz(item).format('Z'), item])
.sort()
@@ -116,11 +116,11 @@
if (this.destination.type === 'СOMPUTATIONAL') {
this.allowInheritView = true;
- this.getExploratorySchedule(this.notebook.name, this.destination.computational_name);
+ this.getExploratorySchedule(this.notebook.project, this.notebook.name, this.destination.computational_name);
this.checkParentInherit();
} else if (this.destination.type === 'EXPLORATORY') {
this.allowInheritView = this.checkIsActiveSpark();
- this.getExploratorySchedule(this.notebook.name);
+ this.getExploratorySchedule(this.notebook.project, this.notebook.name);
}
},
this.schedulerService
@@ -139,7 +139,7 @@
this.inherit = $event.checked;
if (this.destination.type === 'СOMPUTATIONAL' && this.inherit) {
- this.getExploratorySchedule(this.notebook.name);
+ this.getExploratorySchedule(this.notebook.project, this.notebook.name);
this.schedulerForm.get('startDate').disable();
} else {
this.schedulerForm.get('startDate').enable();
@@ -248,18 +248,19 @@
};
if (this.destination.type === 'СOMPUTATIONAL') {
- this.model.confirmAction(this.notebook.name, parameters, this.destination.computational_name);
+ this.model.confirmAction(this.notebook.project, this.notebook.name, parameters, this.destination.computational_name);
} else {
parameters['consider_inactivity'] = this.considerInactivity;
- this.model.confirmAction(this.notebook.name, parameters);
+ this.model.confirmAction(this.notebook.project, this.notebook.name, parameters);
}
}
private setScheduleByInactivity() {
+ console.log(this.notebook)
const data = { sync_start_required: this.parentInherit, check_inactivity_required: this.enableIdleTime, max_inactivity: this.schedulerForm.controls.inactivityTime.value };
(this.destination.type === 'СOMPUTATIONAL')
- ? this.setInactivity(this.notebook.name, data, this.destination.computational_name)
- : this.setInactivity(this.notebook.name, { ...data, consider_inactivity: this.considerInactivity });
+ ? this.setInactivity(this.notebook.project, this.notebook.name, data, this.destination.computational_name)
+ : this.setInactivity(this.notebook.project, this.notebook.name, { ...data, consider_inactivity: this.considerInactivity });
}
private formInit(start?: string, end?: string, terminate?: string) {
@@ -272,8 +273,8 @@
});
}
- private getExploratorySchedule(resource, resource2?) {
- this.schedulerService.getExploratorySchedule(resource, resource2).subscribe(
+ private getExploratorySchedule(project, resource, resource2?) {
+ this.schedulerService.getExploratorySchedule(project, resource, resource2).subscribe(
(params: ScheduleSchema) => {
if (params) {
params.start_days_repeat.filter(key => (this.selectedStartWeekDays[key.toLowerCase()] = true));
@@ -302,7 +303,7 @@
}
private checkParentInherit() {
- this.schedulerService.getExploratorySchedule(this.notebook.name)
+ this.schedulerService.getExploratorySchedule(this.notebook.project, this.notebook.name)
.subscribe((res: any) => this.parentInherit = res.sync_start_required);
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
index f83be29..c0093ee 100644
--- a/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/resources/scheduler/scheduler.model.ts
@@ -61,13 +61,13 @@
if (this.continueWith) this.continueWith();
}
- private scheduleInstance(notebook, params, resourse) {
- return this.schedulerService.setExploratorySchedule(notebook, params, resourse);
+ private scheduleInstance(project, notebook, params, resourse) {
+ return this.schedulerService.setExploratorySchedule(project, notebook, params, resourse);
}
public setInactivityTime(params) {
- const [notebook, data, resource] = params;
- return this.scheduleInstance(notebook, data, resource);
+ const [project, notebook, data, resource] = params;
+ return this.scheduleInstance(project, notebook, data, resource);
}
public resetSchedule(notebook, resourse) {
@@ -75,8 +75,8 @@
}
private prepareModel(fnProcessResults: any, fnProcessErrors: any): void {
- this.confirmAction = (notebook, data, resourse?) =>
- this.scheduleInstance(notebook, data, resourse).subscribe(
+ this.confirmAction = (project, notebook, data, resourse?) =>
+ this.scheduleInstance(project, notebook, data, resourse).subscribe(
response => fnProcessResults(response),
error => fnProcessErrors(error)
);
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
index bac0dd6..4ea5a14 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/index.ts
@@ -25,6 +25,7 @@
import { DirectivesModule } from '../../core/directives';
import { KeysPipeModule, UnderscorelessPipeModule } from '../../core/pipes';
import { BubbleModule } from '..';
+import {MultiLevelSelectDropdownComponent} from './multi-level-select-dropdown/multi-level-select-dropdown.component';
export * from './multi-select-dropdown/multi-select-dropdown.component';
export * from './dropdown-list/dropdown-list.component';
@@ -37,7 +38,7 @@
UnderscorelessPipeModule,
BubbleModule
],
- declarations: [DropdownListComponent, MultiSelectDropdownComponent],
- exports: [DropdownListComponent, MultiSelectDropdownComponent]
+ declarations: [DropdownListComponent, MultiSelectDropdownComponent, MultiLevelSelectDropdownComponent],
+ exports: [DropdownListComponent, MultiSelectDropdownComponent, MultiLevelSelectDropdownComponent]
})
export class FormControlsModule {}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html
new file mode 100644
index 0000000..4e41606
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.html
@@ -0,0 +1,94 @@
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+ -->
+
+<div class="dropdown-multiselect btn-group" ngClass="{{type || ''}}">
+ <button type="button" #list (click)="multiactions.toggle($event, list)">
+ <span class="ellipsis" *ngIf="model.length === 0">Select roles</span>
+ <span class="selected-items ellipsis" *ngIf="model.length !== 0">
+ {{selectedRolesList()}}
+ </span>
+ <span class="caret-btn"><i class="material-icons">keyboard_arrow_down</i></span>
+ </button>
+
+ <bubble-up #multiactions position="bottom" [keep-open]="true" class="mt-5">
+ <ul class="list-menu" id="scrolling">
+ <li class="filter-actions">
+ <a class="select_all" (click)="selectAllOptions($event)">
+ <i class="material-icons">playlist_add_check</i> All
+ </a>
+ <a class="deselect_all" (click)="deselectAllOptions($event)">
+ <i class="material-icons">clear</i> None
+ </a>
+ </li>
+
+ <ng-template ngFor let-item [ngForOf]="items" let-i="index">
+ <li class="role-label" role="presentation" *ngIf="i === 0 || model && item.type !== items[i - 1].type" (click)="toggleItemsForLable(item.type, $event)">
+ <a href="#" class="list-item" role="menuitem">
+ <span class="arrow" [ngClass]="{'rotate-arrow': isOpenCategory[item.type], 'arrow-checked': selectedAllInCattegory(item.type) || selectedSomeInCattegory(item.type)}"></span>
+ <span class="empty-checkbox" [ngClass]="{'checked': selectedAllInCattegory(item.type) || selectedSomeInCattegory(item.type)}" (click)="toggleselectedCategory($event, model, item.type);$event.stopPropagation()" >
+ <span class="checked-checkbox" *ngIf="selectedAllInCattegory(item.type)"></span>
+ <span class="line-checkbox" *ngIf="selectedSomeInCattegory(item.type)"></span>
+ </span>
+ {{labels[item.type] || item.type | titlecase}}
+ </a>
+ </li>
+
+ <li class="role-item" role="presentation" *ngIf="model && isOpenCategory[item.type] && item.type !== 'COMPUTATIONAL_SHAPE' && item.type !== 'NOTEBOOK_SHAPE'" >
+ <a href="#" class="list-item" role="menuitem" (click)="toggleSelectedOptions($event, model, item)">
+ <span class="empty-checkbox" [ngClass]="{'checked': checkInModel(item.role)}">
+ <span class="checked-checkbox" *ngIf="checkInModel(item.role)"></span>
+ </span>
+ {{item.role}}
+ </a>
+ </li>
+ <li class="role-item" role="presentation" (click)="toggleItemsForCloud(item.type + item.cloud, $event)"
+ *ngIf="model && isOpenCategory[item.type] && item.type === 'COMPUTATIONAL_SHAPE' && item.cloud !== items[i - 1].cloud
+ || model && isOpenCategory[item.type] && item.type === 'NOTEBOOK_SHAPE' && item.type !== items[i - 1].type
+ || model && isOpenCategory[item.type] && item.type === 'NOTEBOOK_SHAPE' && item.cloud !== items[i - 1].cloud
+ || model && isOpenCategory[item.type] && item.type === 'COMPUTATIONAL_SHAPE' && item.type !== items[i - 1].type"
+ >
+ <a href="#" class="list-item" role="menuitem">
+ <span class="arrow" [ngClass]="{'rotate-arrow': isCloudOpen[item.type + item.cloud], 'arrow-checked': selectedAllInCloud(item.type, item.cloud) || selectedSomeInCloud(item.type, item.cloud)}"></span>
+ <span class="empty-checkbox" [ngClass]="{'checked': selectedAllInCloud(item.type, item.cloud) || selectedSomeInCloud(item.type, item.cloud)}" (click)="toggleSelectedCloud($event, model, item.type, item.cloud);$event.stopPropagation()" >
+ <span class="checked-checkbox" *ngIf="selectedAllInCloud(item.type, item.cloud)"></span>
+ <span class="line-checkbox" *ngIf="selectedSomeInCloud(item.type, item.cloud)"></span>
+ </span>
+ {{item.cloud || 'AWS'}}
+ </a>
+ </li>
+ <li class="role-cloud-item" role="presentation" *ngIf="model && isCloudOpen[item.type + item.cloud] && isOpenCategory[item.type]" >
+ <a href="#" class="list-item" role="menuitem" (click)="toggleSelectedOptions($event, model, item)">
+ <span class="empty-checkbox" [ngClass]="{'checked': checkInModel(item.role)}">
+ <span class="checked-checkbox" *ngIf="checkInModel(item.role)"></span>
+ </span>
+ {{item.role}}
+ </a>
+ </li>
+
+ </ng-template>
+
+ <li *ngIf="items?.length == 0">
+ <a role="menuitem" class="list-item">
+ <span class="material-icons">visibility_off</span>
+ No {{type}}
+ </a>
+ </li>
+ </ul>
+ </bubble-up>
+</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss
new file mode 100644
index 0000000..5323a24
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.scss
@@ -0,0 +1,321 @@
+/*!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+.dropdown-list,
+.dropdown-multiselect {
+ width: 100%;
+ position: relative;
+}
+
+.dropdown-list button,
+.dropdown-multiselect button {
+ height: 38px;
+ width: 100%;
+ background: #fff;
+ padding-left: 15px;
+ font-size: 14px;
+ // height: 34px;
+ text-align: left;
+ white-space: nowrap;
+ cursor: pointer;
+ border-radius: 0;
+ border: none;
+ outline: none;
+ box-shadow: 0 3px 1px -2px rgba(0, 0, 0, .2), 0 2px 2px 0 rgba(0, 0, 0, .14), 0 1px 5px 0 rgba(0, 0, 0, .12);
+}
+
+.dropdown-list {
+ button {
+ line-height: 38px;
+
+ span {
+ color: #4a5c89;
+
+ em {
+ font-size: 13px;
+ color: #35afd5;
+ margin-right: 0px;
+ font-style: normal;
+ }
+ }
+ }
+}
+
+.dropdown-list button:active,
+.dropdown-list button:focus,
+.dropdown-multiselect button:active,
+.dropdown-multiselect button:focus {
+ box-shadow: 0 5px 5px -3px rgba(0, 0, 0, .2), 0 8px 10px 1px rgba(0, 0, 0, .14), 0 3px 14px 2px rgba(0, 0, 0, .12);
+}
+
+.dropdown-multiselect {
+ button {
+ span {
+ color: #999;
+ font-weight: 300;
+ display: inline-block;
+ max-width: 80%;
+ }
+
+ .selected-items {
+ color: #4a5c89;
+ max-width: 477px;
+ }
+ }
+}
+
+.selected-items strong {
+ font-weight: 300;
+}
+
+.dropdown-list,
+.dropdown-multiselect {
+ .caret-btn {
+ position: absolute;
+ top: 0;
+ right: 0;
+ width: 40px;
+ height: 100%;
+ text-align: center;
+ padding: 7px;
+ -webkit-appearance: none;
+ -moz-appearance: none;
+ border-left: 1px solid #ececec;
+ background: #fff;
+ color: #36afd5 !important;
+ }
+
+ .list-menu {
+ width: 100%;
+ max-height: 450px;
+ left: 0;
+ padding: 0;
+ margin: 0;
+ overflow-y: auto;
+ overflow-x: hidden;
+
+ li {
+ padding: 0;
+ margin: 0;
+ }
+ .role-item{
+ padding-left: 30px;
+ }
+ .role-cloud-item{
+ padding-left: 60px;
+ }
+
+ }
+
+ &.statuses {
+ .list-menu {
+ .list-item {
+ text-transform: capitalize;
+ }
+ }
+ }
+
+ &.resources {
+ .list-menu {
+ .list-item {
+ text-transform: capitalize;
+ }
+ }
+ }
+}
+
+.dropdown-list .list-menu a,
+.dropdown-multiselect .list-menu li a {
+ display: block;
+ padding: 10px;
+ padding-left: 15px;
+ position: relative;
+ font-weight: 300;
+ cursor: pointer;
+ color: #4a5c89;
+ text-decoration: none;
+}
+
+.dropdown-multiselect .list-menu li a {
+ padding-left: 45px;
+ transition: all .45s ease-in-out;
+}
+
+.dropdown-list .list-menu a:hover,
+.dropdown-multiselect .list-menu a:hover {
+ background: #f7f7f7;
+ color: #35afd5;
+}
+
+.dropdown-multiselect .list-menu .filter-actions {
+ display: flex;
+ cursor: pointer;
+ border-bottom: 1px solid #ececec;
+}
+
+.dropdown-multiselect .list-menu .filter-actions a {
+ width: 50%;
+ color: #35afd5;
+ display: block;
+ padding: 0;
+ line-height: 40px !important;
+ text-align: center;
+}
+
+.dropdown-list {
+
+ .list-menu,
+ .title {
+ span {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ font-weight: 300;
+ }
+ }
+}
+
+.dropdown-list .list-menu li span.caption {
+ display: block;
+ padding: 10px 15px;
+ cursor: default;
+}
+
+.dropdown-list .list-menu li i,
+.dropdown-list .list-menu li strong {
+ display: inline-block;
+ width: 30px;
+ text-align: center;
+ vertical-align: middle;
+ color: #35afd5;
+ line-height: 26px;
+}
+
+.dropdown-list .list-menu li i {
+ vertical-align: sub;
+ font-size: 18px;
+}
+
+.dropdown-list .list-menu a {
+ padding: 12px;
+ padding-left: 15px;
+ position: relative;
+ font-weight: 300;
+ cursor: pointer;
+
+ em {
+ font-size: 13px;
+ color: #35afd5;
+ margin-right: 0px;
+ font-style: normal;
+ }
+}
+
+.dropdown-list .list-menu a.empty {
+ height: 36px;
+}
+
+.dropdown-multiselect .list-menu .filter-actions i {
+ vertical-align: sub;
+ color: #35afd5;
+ font-size: 18px;
+ line-height: 26px;
+ transition: all .45s ease-in-out;
+}
+
+.dropdown-multiselect .list-menu .select_all:hover,
+.dropdown-multiselect .list-menu .select_all:hover i {
+ color: #4eaf3e !important;
+ background: #f9fafb;
+}
+
+.dropdown-multiselect .list-menu .deselect_all:hover,
+.dropdown-multiselect .list-menu .deselect_all:hover i {
+ color: #f1696e !important;
+ background: #f9fafb;
+}
+
+.dropdown-multiselect .list-menu a {
+ span {
+ position: absolute;
+ top: 10px;
+ left: 25px;
+ color: #35afd5;
+
+ &.checked-checkbox {
+ top: 0px;
+ left: 4px;
+ width: 5px;
+ height: 10px;
+ border-bottom: 2px solid white;
+ border-right: 2px solid white;
+ position: absolute;
+ transform: rotate(45deg);
+ }
+
+ &.line-checkbox {
+ top: 0px;
+ left: 2px;
+ width: 8px;
+ height: 7px;
+ border-bottom: 2px solid white;
+ }
+
+ &.arrow{
+ width: 16px;
+ height: 14px;
+ border: 8px solid transparent;
+ border-left: 8px solid lightgrey;
+ left: 10px;
+ top: 12px;
+ border-radius: 3px;
+
+ &.rotate-arrow{
+ transform: rotate(90deg);
+ transition: .1s ease-in-out;
+ top: 15px;
+ left: 6px;
+ }
+
+ &.arrow-checked{
+ border-left: 8px solid #35afd5;
+ }
+ }
+ }
+
+
+}
+
+.dropdown-multiselect.btn-group.open .dropdown-toggle {
+ box-shadow: none;
+}
+
+.empty-checkbox {
+ width: 16px;
+ height: 16px;
+ border-radius: 2px;
+ border: 2px solid lightgrey;
+ margin-top: 2px;
+ position: relative;
+ &.checked {
+ border-color: #35afd5;
+ background-color: #35afd5;
+ }
+}
+
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts
new file mode 100644
index 0000000..cabf7d9
--- /dev/null
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/form-controls/multi-level-select-dropdown/multi-level-select-dropdown.component.ts
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { Input, Output, Component, EventEmitter } from '@angular/core';
+
+@Component({
+ selector: 'multi-level-select-dropdown',
+ templateUrl: 'multi-level-select-dropdown.component.html',
+ styleUrls: ['multi-level-select-dropdown.component.scss']
+})
+
+export class MultiLevelSelectDropdownComponent {
+
+ @Input() items: Array<any>;
+ @Input() model: Array<any>;
+ @Input() type: string;
+ @Output() selectionChange: EventEmitter<{}> = new EventEmitter();
+
+ public isOpenCategory = {
+ };
+
+ public isCloudOpen = {
+
+ };
+
+ public labels = {
+ COMPUTATIONAL_SHAPE: 'Compute shapes',
+ NOTEBOOK_SHAPE: 'Notebook shapes',
+ COMPUTATIONAL: 'Compute'
+ };
+
+ toggleSelectedOptions($event, model, value) {
+ $event.preventDefault();
+ const currRole = model.filter(v => v.role === value.role).length;
+ currRole ? this.model = model.filter(v => v.role !== value.role) : model.push(value);
+ this.onUpdate($event);
+ }
+
+ toggleselectedCategory($event, model, value) {
+ $event.preventDefault();
+ const categoryItems = this.items.filter(role => role.type === value);
+ this.selectedAllInCattegory(value) ? this.model = this.model.filter(role => role.type !== value) : categoryItems.forEach(role => {
+ if (!model.filter(mod => mod.role === role.role).length) {this.model.push(role); }
+ });
+ this.onUpdate($event);
+ }
+
+ toggleSelectedCloud($event, model, category, cloud) {
+ $event.preventDefault();
+ const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+ this.selectedAllInCloud(category, cloud) ? this.model = this.model.filter(role => {
+ if (role.type === category && role.cloud === cloud) {
+ return false;
+ }
+ return true;
+ }) : categoryItems.forEach(role => {
+ if (!model.filter(mod => mod.role === role.role).length) {this.model.push(role); }
+ });
+ this.onUpdate($event);
+ }
+
+ selectAllOptions($event) {
+ $event.preventDefault();
+ this.model = [...this.items];
+ this.onUpdate($event);
+ $event.preventDefault();
+ }
+
+ deselectAllOptions($event) {
+ this.model = [];
+ this.onUpdate($event);
+ $event.preventDefault();
+ }
+
+ onUpdate($event): void {
+ this.selectionChange.emit({ model: this.model, type: this.type, $event });
+ }
+
+ public toggleItemsForLable(label, $event) {
+ this.isOpenCategory[label] = !this.isOpenCategory[label];
+ this.isCloudOpen[label + 'AWS'] = false;
+ this.isCloudOpen[label + 'GCP'] = false;
+ this.isCloudOpen[label + 'AZURE'] = false;
+ $event.preventDefault();
+ }
+
+ public toggleItemsForCloud(label, $event) {
+ this.isCloudOpen[label] = !this.isCloudOpen[label];
+ $event.preventDefault();
+ }
+
+ public selectedAllInCattegory(category) {
+ const selected = this.model.filter(role => role.type === category);
+ const categoryItems = this.items.filter(role => role.type === category);
+ return selected.length === categoryItems.length;
+ }
+
+ public selectedSomeInCattegory(category) {
+ const selected = this.model.filter(role => role.type === category);
+ const categoryItems = this.items.filter(role => role.type === category);
+ return selected.length && selected.length !== categoryItems.length;
+ }
+
+ public selectedAllInCloud(category, cloud) {
+ const selected = this.model.filter(role => role.type === category && role.cloud === cloud);
+ const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+ return selected.length === categoryItems.length;
+ }
+
+ public selectedSomeInCloud(category, cloud) {
+ const selected = this.model.filter(role => role.type === category && role.cloud === cloud);
+ const categoryItems = this.items.filter(role => role.type === category && role.cloud === cloud);
+ return selected.length && selected.length !== categoryItems.length;
+ }
+
+ public checkInModel(item) {
+ return this.model.filter(v => v.role === item).length;
+ }
+
+ public selectedRolesList() {
+ return this.model.map(role => role.role).join(',');
+ }
+}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts
index 701f03f..ca05251 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog-type.enum.ts
@@ -21,5 +21,6 @@
StopExploratory = 0,
TerminateExploratory = 1,
TerminateComputationalResources = 2,
- StopEdgeNode = 3
+ StopEdgeNode = 3,
+ deleteUser = 4,
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
index 30155fe..9bb12e7 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.html
@@ -23,16 +23,22 @@
<span *ngIf="model.notebook.name && model.notebook.name !== 'edge node'">
<span>{{ confirmationType ? 'Terminate' : 'Stop' }} notebook: {{ model.notebook.name }}</span>
</span>
- <span *ngIf="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node'">
+ <span *ngIf="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node' || data.type === 4 && data.notebook.length">
<i class="material-icons">priority_high</i>Warning
</span>
+ <span *ngIf="data.type === 4 && !data.notebook.length">
+ Update group data
+ </span>
</h4>
<button type="button" class="close" (click)="dialogRef.close()">×</button>
</header>
<div class="dialog-content">
<div class="content-box">
- <p class="info text-center">{{ model.title }}</p>
-
+ <p *ngIf="data.type !== 4" class="info text-center">{{ model.title }}</p>
+ <div *ngIf="data.type === 4" class="text-center m-bot-20">
+ <h3 class="strong">Group data will be updated.</h3>
+ </div>
+ <p *ngIf="data.type === 4 && data.notebook.length" class="text-center delete-user">User<span *ngIf="data.notebook.length>1">s</span> <span class="strong"> {{data.notebook.join(', ')}} </span>will be deleted from this group. All <span *ngIf="data.notebook.length===1">his</span><span *ngIf="data.notebook.length>1">their</span> resources authorized within this group will be terminated.</p>
<mat-list class="resources"
[hidden]="model.notebook.type === 'Edge Node' || model.notebook.name === 'edge node'
|| !model.notebook.resources || model.notebook.resources.length === 0 || (!isAliveResources && !confirmationType) || onlyKilled">
@@ -58,7 +64,8 @@
</div>
<div class="text-center m-top-20">
<button mat-raised-button type="button" class="butt action" (click)="dialogRef.close()">No</button>
- <button mat-raised-button type="button" class="butt butt-success action" (click)="confirm()">Yes</button>
+ <button *ngIf="data.type !== 4" mat-raised-button type="button" class="butt butt-success action" (click)="confirm()">Yes</button>
+ <button *ngIf="data.type === 4" mat-raised-button type="button" class="butt butt-success action" (click)="dialogRef.close(true)">Yes</button>
</div>
</div>
</div>
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
index 305c504..c71e2ed 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.component.scss
@@ -18,6 +18,9 @@
*/
.confirmation-dialog {
+ h3{
+ margin-bottom: 20px;
+ }
color: #718ba6;
p {
font-size: 14px;
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
index 41c31cc..1bfcd06 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/confirmation-dialog/confirmation-dialog.model.ts
@@ -71,19 +71,19 @@
private stopExploratory(): Observable<{}> {
return this.manageAction
- ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.name)
+ ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.project, this.notebook.name)
: this.userResourceService.suspendExploratoryEnvironment(this.notebook, 'stop');
}
private terminateExploratory(): Observable<{}> {
return this.manageAction
- ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'terminate', this.notebook.name)
+ ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'terminate', this.notebook.project, this.notebook.name)
: this.userResourceService.suspendExploratoryEnvironment(this.notebook, 'terminate');
}
private stopEdgeNode(): Observable<{}> {
return this.manageAction
- ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', 'edge')
+ ? this.manageEnvironmentsService.environmentManagement(this.notebook.user, 'stop', this.notebook.project, 'edge')
: this.healthStatusService.suspendEdgeNode();
}
diff --git a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
index f0c7910..20ec20f 100644
--- a/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
+++ b/services/self-service/src/main/resources/webapp/src/app/shared/modal-dialog/notification-dialog/notification-dialog.component.ts
@@ -30,6 +30,13 @@
<button type="button" class="close" (click)="dialogRef.close()">×</button>
</header>
<div mat-dialog-content class="content message">
+ <div *ngIf="data.type === 'terminateNode'" class="table-header">
+ <div *ngIf="data.item.action.endpoint.length > 0">
+ Edge node<span *ngIf="data.item.action.endpoint.length>1">s</span>
+ <span class="strong">{{ ' ' + data.item.action.endpoint.join(', ') }}</span> in project
+ <span class="strong">{{ data.item.action.project_name }}</span> will be terminated.
+ </div>
+ </div>
<div *ngIf="data.type === 'list'" class="info">
<div *ngIf="data.template.notebook.length > 0">
Following notebook server<span *ngIf="data.template.notebook.length>1">s </span>
@@ -80,12 +87,12 @@
</div>
</div>
</div>
- <div class="confirm-resource-terminating">
- <label>
- <input class="checkbox" type="checkbox"
- (change)="terminateResource()"/>Do not terminate all related resources
- </label>
- </div>
+<!-- <div class="confirm-resource-terminating">-->
+<!-- <label>-->
+<!-- <input class="checkbox" type="checkbox"-->
+<!-- (change)="terminateResource()"/>Do not terminate all related resources-->
+<!-- </label>-->
+<!-- </div>-->
<p class="confirm-message">
<span *ngIf="!willNotTerminate">All connected computational resources will be terminated as well.</span>
</p>
@@ -102,8 +109,7 @@
</mat-list-item>
</div>
</mat-list>
- <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
-
+ <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
<div class="text-center m-top-30 m-bott-10">
<button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
<button *ngIf="!this.willNotTerminate" type="button" class="butt butt-success" mat-raised-button
@@ -114,6 +120,30 @@
</button>
</div>
</div>
+ <div class="confirm-dialog" *ngIf="data.type === 'terminateNode'">
+ <mat-list *ngIf="data.item.resources.length > 0">
+ <mat-list-item class="list-header sans">
+ <div class="endpoint">Resources</div>
+ <div class="status">Further status</div>
+ </mat-list-item>
+ <div class="scrolling-content">
+ <mat-list-item *ngFor="let resource of data.item.resources" class="sans node">
+ <div class="endpoint ellipsis">{{resource}}</div>
+ <div class="status terminated">Terminated</div>
+ </mat-list-item>
+ </div>
+ </mat-list>
+ <div mat-dialog-content class="bottom-message" *ngIf="data.item.resources.length > 0">
+ <span class="confirm-message">All connected computational resources will be terminated as well.</span>
+ </div>
+ <p class="m-top-20"><span class="strong">Do you want to proceed?</span></p>
+ <div class="text-center m-top-30 m-bott-10">
+ <button type="button" class="butt" mat-raised-button (click)="dialogRef.close()">No</button>
+ <button type="button" class="butt butt-success" mat-raised-button
+ (click)="dialogRef.close(true)">Yes
+ </button>
+ </div>
+ </div>
</div>
</div>
`,
@@ -137,9 +167,11 @@
.resource-list-header{display: flex; font-weight: 600; font-size: 16px;height: 48px; border-top: 1px solid #edf1f5; border-bottom: 1px solid #edf1f5; padding: 0 20px;}
.resource-list-row{display: flex; border-bottom: 1px solid #edf1f5;padding: 0 20px;}
.confirm-resource-terminating{text-align: left; padding: 10px 20px;}
- .confirm-message{color: #ef5c4b;font-size: 13px;min-height: 18px; text-align: center;}
+ .confirm-message{color: #ef5c4b;font-size: 13px;min-height: 18px; text-align: center; padding-top: 20px}
.checkbox{margin-right: 5px;vertical-align: middle; margin-bottom: 3px;}
label{cursor: pointer}
+ .bottom-message{padding-top: 15px;}
+ .table-header{padding-bottom: 10px;}
`]
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
index 1c602d0..595f36c 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_dialogs.scss
@@ -62,6 +62,7 @@
#dialog-box {
color: $modal-text-color;
+ min-height: 150px;
.dialog-header {
padding-left: 30px;
@@ -335,10 +336,16 @@
top: 49%;
}
+.confirmation-dialog p.delete-user{
+ font-weight: 500;
+ max-height: 200px;
+ overflow: auto;
+
.disabled {
opacity: 0.4;
cursor: not-allowed;
pointer-events: none;
+
}
@media screen and (max-width: 1280px) {
diff --git a/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss b/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
index 380aa75..7b48bba 100644
--- a/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
+++ b/services/self-service/src/main/resources/webapp/src/assets/styles/_theme.scss
@@ -615,10 +615,8 @@
.mat-table {
.header-row {
th.mat-header-cell {
- font-size: 15px;
font-family: 'Open Sans', sans-serif;
font-weight: 600;
- color: #607D8B;
}
.mat-cell {
@@ -658,3 +656,9 @@
background-color: #baf0f7;
}
}
+.manage-roles{
+ .mat-horizontal-content-container{
+ overflow: visible !important;
+ }
+}
+
diff --git a/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts b/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
index be25a2c..b7d9abc 100644
--- a/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
+++ b/services/self-service/src/main/resources/webapp/src/dictionary/azure.dictionary.ts
@@ -34,16 +34,16 @@
'max_cluster_name_length': 10,
'billing': {
'resourceName': 'resourceName',
- 'cost': 'costString',
+ 'cost': 'cost',
'costTotal': 'cost_total',
'currencyCode': 'currencyCode',
'dateFrom': 'from',
'dateTo': 'to',
'service': 'meterCategory',
- 'service_filter_key': 'category',
+ 'service_filter_key': 'meterCategory',
'type': '',
'resourceType': 'resource_type',
- 'instance_size': 'shape',
+ 'instance_size': 'size',
'dlabId': 'dlabId'
},
'service': 'Category',
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
index a8b01fa..c257995 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/EnvironmentResourceTest.java
@@ -41,7 +41,14 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
public class EnvironmentResourceTest extends TestBase {
@@ -126,9 +133,9 @@
@Test
public void stopEnv() {
- doNothing().when(environmentService).stopEnvironment(any(UserInfo.class), anyString());
+ doNothing().when(environmentService).stopEnvironment(any(UserInfo.class), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop")
+ .target("/environment/stop/projectName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -136,16 +143,16 @@
assertEquals(HttpStatus.SC_OK, response.getStatus());
assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).stopEnvironment(new UserInfo(USER, TOKEN), USER);
+ verify(environmentService).stopEnvironment(new UserInfo(USER, TOKEN), USER, "projectName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void stopEnvWithFailedAuth() throws AuthenticationException {
authFailSetup();
- doNothing().when(environmentService).stopEnvironment(any(UserInfo.class), anyString());
+ doNothing().when(environmentService).stopEnvironment(any(UserInfo.class), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop")
+ .target("/environment/stop/projectName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -159,9 +166,9 @@
@Test
public void stopEnvWithResourceConflictException() {
doThrow(new ResourceConflictException("Can not stop environment because one of the user resources is in " +
- "status CREATING or STARTING")).when(environmentService).stopEnvironment(any(UserInfo.class), anyString());
+ "status CREATING or STARTING")).when(environmentService).stopEnvironment(any(UserInfo.class), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop")
+ .target("/environment/stop/projectName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -169,15 +176,15 @@
assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).stopEnvironment(new UserInfo(USER, TOKEN), USER);
+ verify(environmentService).stopEnvironment(new UserInfo(USER, TOKEN), USER, "projectName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void stopNotebook() {
- doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString());
+ doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop/explName")
+ .target("/environment/stop/projectName/explName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -185,16 +192,16 @@
assertEquals(HttpStatus.SC_OK, response.getStatus());
assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+ verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void stopNotebookWithFailedAuth() throws AuthenticationException {
authFailSetup();
- doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString());
+ doNothing().when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop/explName")
+ .target("/environment/stop/projectName/explName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -208,9 +215,9 @@
@Test
public void stopNotebookWithResourceConflictException() {
doThrow(new ResourceConflictException("Can not stop notebook because its status is CREATING or STARTING"))
- .when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString());
+ .when(environmentService).stopExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop/explName")
+ .target("/environment/stop/projectName/explName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -218,15 +225,15 @@
assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+ verify(environmentService).stopExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void stopCluster() {
- doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+ doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop/explName/compName")
+ .target("/environment/stop/projectName/explName/compName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -234,16 +241,16 @@
assertEquals(HttpStatus.SC_OK, response.getStatus());
assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+ verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void stopClusterWithFailedAuth() throws AuthenticationException {
authFailSetup();
- doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+ doNothing().when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop/explName/compName")
+ .target("/environment/stop/projectName/explName/compName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -257,9 +264,9 @@
@Test
public void stopClusterWithResourceConflictException() {
doThrow(new ResourceConflictException("Can not stop cluster because its status is CREATING or STARTING"))
- .when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+ .when(environmentService).stopComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/stop/explName/compName")
+ .target("/environment/stop/projectName/explName/compName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -267,15 +274,15 @@
assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+ verify(environmentService).stopComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void terminateNotebook() {
- doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString());
+ doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/terminate/explName")
+ .target("/environment/terminate/projectName/explName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -283,16 +290,16 @@
assertEquals(HttpStatus.SC_OK, response.getStatus());
assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+ verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void terminateNotebookWithFailedAuth() throws AuthenticationException {
authFailSetup();
- doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString());
+ doNothing().when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/terminate/explName")
+ .target("/environment/terminate/projectName/explName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -306,9 +313,9 @@
@Test
public void terminateNotebookWithResourceConflictException() {
doThrow(new ResourceConflictException("Can not terminate notebook because its status is CREATING or STARTING"))
- .when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString());
+ .when(environmentService).terminateExploratory(any(UserInfo.class), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/terminate/explName")
+ .target("/environment/terminate/projectName/explName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -316,15 +323,15 @@
assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "explName");
+ verify(environmentService).terminateExploratory(new UserInfo(USER, TOKEN), USER, "projectName", "explName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void terminateCluster() {
- doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+ doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/terminate/explName/compName")
+ .target("/environment/terminate/projectName/explName/compName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -332,16 +339,16 @@
assertEquals(HttpStatus.SC_OK, response.getStatus());
assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+ verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
verifyNoMoreInteractions(environmentService);
}
@Test
public void terminateClusterWithFailedAuth() throws AuthenticationException {
authFailSetup();
- doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+ doNothing().when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/terminate/explName/compName")
+ .target("/environment/terminate/projectName/explName/compName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -355,9 +362,9 @@
@Test
public void terminateClusterWithResourceConflictException() {
doThrow(new ResourceConflictException("Can not terminate cluster because its status is CREATING or STARTING"))
- .when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
+ .when(environmentService).terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/environment/terminate/explName/compName")
+ .target("/environment/terminate/projectName/explName/compName")
.request()
.header("Authorization", "Bearer " + TOKEN)
.post(Entity.text(USER));
@@ -365,7 +372,7 @@
assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "explName", "compName");
+ verify(environmentService).terminateComputational(new UserInfo(USER, TOKEN), USER, "projectName", "explName", "compName");
verifyNoMoreInteractions(environmentService);
}
}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
index f6f4692..bccfa8b 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ExploratoryResourceTest.java
@@ -47,7 +47,14 @@
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
public class ExploratoryResourceTest extends TestBase {
@@ -154,9 +161,9 @@
@Test
public void stop() {
- when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn("someUuid");
+ when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/stop")
+ .target("/infrastructure_provision/exploratory_environment/project/someName/stop")
.request()
.header("Authorization", "Bearer " + TOKEN)
.delete();
@@ -165,16 +172,16 @@
assertEquals("someUuid", response.readEntity(String.class));
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryService).stop(getUserInfo(), "someName");
+ verify(exploratoryService).stop(getUserInfo(), "project", "someName");
verifyNoMoreInteractions(exploratoryService);
}
@Test
public void stopWithFailedAuth() throws AuthenticationException {
authFailSetup();
- when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn("someUuid");
+ when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/stop")
+ .target("/infrastructure_provision/exploratory_environment/project/someName/stop")
.request()
.header("Authorization", "Bearer " + TOKEN)
.delete();
@@ -183,16 +190,16 @@
assertEquals("someUuid", response.readEntity(String.class));
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryService).stop(getUserInfo(), "someName");
+ verify(exploratoryService).stop(getUserInfo(), "project", "someName");
verifyNoMoreInteractions(exploratoryService);
}
@Test
public void stopWithException() {
doThrow(new DlabException("Could not stop exploratory environment"))
- .when(exploratoryService).stop(any(UserInfo.class), anyString());
+ .when(exploratoryService).stop(any(UserInfo.class), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/stop")
+ .target("/infrastructure_provision/exploratory_environment/project/someName/stop")
.request()
.header("Authorization", "Bearer " + TOKEN)
.delete();
@@ -204,15 +211,15 @@
assertTrue(actualJson.contains(expectedJson));
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryService).stop(getUserInfo(), "someName");
+ verify(exploratoryService).stop(getUserInfo(), "project", "someName");
verifyNoMoreInteractions(exploratoryService);
}
@Test
public void terminate() {
- when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn("someUuid");
+ when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/terminate")
+ .target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
.request()
.header("Authorization", "Bearer " + TOKEN)
.delete();
@@ -221,16 +228,16 @@
assertEquals("someUuid", response.readEntity(String.class));
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryService).terminate(getUserInfo(), "someName");
+ verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
verifyNoMoreInteractions(exploratoryService);
}
@Test
public void terminateWithFailedAuth() throws AuthenticationException {
authFailSetup();
- when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn("someUuid");
+ when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn("someUuid");
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/terminate")
+ .target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
.request()
.header("Authorization", "Bearer " + TOKEN)
.delete();
@@ -239,16 +246,16 @@
assertEquals("someUuid", response.readEntity(String.class));
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryService).terminate(getUserInfo(), "someName");
+ verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
verifyNoMoreInteractions(exploratoryService);
}
@Test
public void terminateWithException() {
doThrow(new DlabException("Could not terminate exploratory environment"))
- .when(exploratoryService).terminate(any(UserInfo.class), anyString());
+ .when(exploratoryService).terminate(any(UserInfo.class), anyString(), anyString());
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/terminate")
+ .target("/infrastructure_provision/exploratory_environment/project/someName/terminate")
.request()
.header("Authorization", "Bearer " + TOKEN)
.delete();
@@ -260,22 +267,22 @@
assertTrue(actualJson.contains(expectedJson));
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryService).terminate(getUserInfo(), "someName");
+ verify(exploratoryService).terminate(getUserInfo(), "project", "someName");
verifyNoMoreInteractions(exploratoryService);
}
@Test
public void updateSparkConfig() {
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/reconfigure")
+ .target("/infrastructure_provision/exploratory_environment/someProject/someName/reconfigure")
.request()
.header("Authorization", "Bearer " + TOKEN)
.put(Entity.json(Collections.singletonList(new ClusterConfig())));
assertEquals(HttpStatus.SC_OK, response.getStatus());
- verify(exploratoryService).updateClusterConfig(refEq(getUserInfo()), eq("someName"),
- eq(Collections.singletonList(new ClusterConfig())));
+ verify(exploratoryService).updateClusterConfig(refEq(getUserInfo()), eq("someProject"),
+ eq("someName"), eq(Collections.singletonList(new ClusterConfig())));
verifyNoMoreInteractions(exploratoryService);
}
@@ -283,9 +290,9 @@
public void getSparkConfig() {
final ClusterConfig config = new ClusterConfig();
config.setClassification("test");
- when(exploratoryService.getClusterConfig(any(UserInfo.class), anyString())).thenReturn(Collections.singletonList(config));
+ when(exploratoryService.getClusterConfig(any(UserInfo.class), anyString(), anyString())).thenReturn(Collections.singletonList(config));
final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/someName/cluster/config")
+ .target("/infrastructure_provision/exploratory_environment/someProject/someName/cluster/config")
.request()
.header("Authorization", "Bearer " + TOKEN)
.get();
@@ -296,7 +303,7 @@
assertEquals(1, clusterConfigs.size());
assertEquals("test", clusterConfigs.get(0).getClassification());
- verify(exploratoryService).getClusterConfig(refEq(getUserInfo()), eq("someName"));
+ verify(exploratoryService).getClusterConfig(refEq(getUserInfo()), eq("someProject"), eq("someName"));
verifyNoMoreInteractions(exploratoryService);
}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
index 2a7e4c0..38c0e46 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ImageExploratoryResourceTest.java
@@ -45,10 +45,16 @@
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
public class ImageExploratoryResourceTest extends TestBase {
-
+ private static final String PROJECT = "projectName";
private ImageExploratoryService imageExploratoryService = mock(ImageExploratoryService.class);
private RequestId requestId = mock(RequestId.class);
@@ -63,7 +69,7 @@
@Test
public void createImage() {
- when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString()))
+ when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString()))
.thenReturn("someUuid");
when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
final Response response = resources.getJerseyTest()
@@ -75,7 +81,7 @@
assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
+ verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName",
"someImageName", "someDescription");
verify(requestId).put(USER.toLowerCase(), "someUuid");
verifyNoMoreInteractions(imageExploratoryService, requestId);
@@ -84,7 +90,7 @@
@Test
public void createImageWithFailedAuth() throws AuthenticationException {
authFailSetup();
- when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString()))
+ when(imageExploratoryService.createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString()))
.thenReturn("someUuid");
when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
final Response response = resources.getJerseyTest()
@@ -96,8 +102,7 @@
assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
- "someImageName", "someDescription");
+ verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName", "someImageName", "someDescription");
verify(requestId).put(USER.toLowerCase(), "someUuid");
verifyNoMoreInteractions(imageExploratoryService, requestId);
}
@@ -105,7 +110,7 @@
@Test
public void createImageWithException() {
doThrow(new ResourceAlreadyExistException("Image with name is already exist"))
- .when(imageExploratoryService).createImage(any(UserInfo.class), anyString(), anyString(), anyString());
+ .when(imageExploratoryService).createImage(any(UserInfo.class), anyString(), anyString(), anyString(), anyString());
final Response response = resources.getJerseyTest()
.target("/infrastructure_provision/exploratory_environment/image")
.request()
@@ -115,8 +120,7 @@
assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(imageExploratoryService).createImage(getUserInfo(), "someNotebookName",
- "someImageName", "someDescription");
+ verify(imageExploratoryService).createImage(getUserInfo(), PROJECT, "someNotebookName", "someImageName", "someDescription");
verifyNoMoreInteractions(imageExploratoryService);
verifyZeroInteractions(requestId);
}
@@ -263,6 +267,7 @@
private ExploratoryImageCreateFormDTO getExploratoryImageCreateFormDTO() {
ExploratoryImageCreateFormDTO eicfDto = new ExploratoryImageCreateFormDTO("someImageName", "someDescription");
eicfDto.setNotebookName("someNotebookName");
+ eicfDto.setProjectName(PROJECT);
return eicfDto;
}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
index 50f6763..c7f5ced 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/LibExploratoryResourceTest.java
@@ -22,7 +22,12 @@
import com.epam.dlab.auth.UserInfo;
import com.epam.dlab.backendapi.dao.ExploratoryDAO;
import com.epam.dlab.backendapi.domain.RequestId;
-import com.epam.dlab.backendapi.resources.dto.*;
+import com.epam.dlab.backendapi.resources.dto.LibInfoRecord;
+import com.epam.dlab.backendapi.resources.dto.LibInstallFormDTO;
+import com.epam.dlab.backendapi.resources.dto.LibKey;
+import com.epam.dlab.backendapi.resources.dto.LibraryDTO;
+import com.epam.dlab.backendapi.resources.dto.LibraryStatus;
+import com.epam.dlab.backendapi.resources.dto.SearchLibsFormDTO;
import com.epam.dlab.backendapi.service.ExternalLibraryService;
import com.epam.dlab.backendapi.service.LibraryService;
import com.epam.dlab.dto.UserInstanceDTO;
@@ -50,331 +55,355 @@
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
public class LibExploratoryResourceTest extends TestBase {
- private static final String LIB_GROUP = "group";
- private static final String LIB_NAME = "name";
- private static final String LIB_VERSION = "version";
- private static final String EXPLORATORY_NAME = "explName";
- private static final String COMPUTATIONAL_NAME = "compName";
- private static final String UUID = "uid";
- private ExploratoryDAO exploratoryDAO = mock(ExploratoryDAO.class);
- private LibraryService libraryService = mock(LibraryService.class);
- private RESTService provisioningService = mock(RESTService.class);
- private ExternalLibraryService externalLibraryService = mock(ExternalLibraryService.class);
- private RequestId requestId = mock(RequestId.class);
+ private static final String LIB_GROUP = "group";
+ private static final String LIB_NAME = "name";
+ private static final String LIB_VERSION = "version";
+ private static final String EXPLORATORY_NAME = "explName";
+ private static final String PROJECT = "projectName";
+ private static final String COMPUTATIONAL_NAME = "compName";
+ private static final String UUID = "uid";
+ private ExploratoryDAO exploratoryDAO = mock(ExploratoryDAO.class);
+ private LibraryService libraryService = mock(LibraryService.class);
+ private RESTService provisioningService = mock(RESTService.class);
+ private ExternalLibraryService externalLibraryService = mock(ExternalLibraryService.class);
+ private RequestId requestId = mock(RequestId.class);
- @Rule
- public final ResourceTestRule resources = getResourceTestRuleInstance(
- new LibExploratoryResource(exploratoryDAO, libraryService, externalLibraryService));
+ @Rule
+ public final ResourceTestRule resources = getResourceTestRuleInstance(
+ new LibExploratoryResource(exploratoryDAO, libraryService, externalLibraryService));
- @Before
- public void setup() throws AuthenticationException {
- authSetup();
- }
+ @Before
+ public void setup() throws AuthenticationException {
+ authSetup();
+ }
- @Test
- public void getLibGroupListWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn
- (getUserInstanceDto());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_groups")
- .queryParam("exploratory_name", "explName")
- .queryParam("computational_name", "compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ @Test
+ public void getLibGroupListWithFailedAuth() throws AuthenticationException {
+ authFailSetup();
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+ .thenReturn(getUserInstanceDto());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_groups")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("project_name", "projectName")
+ .queryParam("computational_name", "compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getLibGroupListWithException() {
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn
- (getUserInstanceDto());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_groups")
- .queryParam("exploratory_name", "explName")
- .queryParam("computational_name", "compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn
+ (getUserInstanceDto());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_groups")
+ .queryParam("project_name", "projectName")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("computational_name", "compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getLibGroupListWithoutComputationalWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_groups")
- .queryParam("exploratory_name", "explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ authFailSetup();
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_groups")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("project_name", "projectName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getLibGroupListWithoutComputationalWithException() {
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_groups")
- .queryParam("exploratory_name", "explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_groups")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("project_name", "projectName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getLibList() {
- when(libraryService.getLibs(anyString(), anyString(), anyString())).thenReturn(getDocuments());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_list")
- .queryParam("exploratory_name", "explName")
- .queryParam("computational_name", "compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ when(libraryService.getLibs(anyString(), anyString(), anyString(), anyString())).thenReturn(getDocuments());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_list")
+ .queryParam("project_name", "projectName")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("computational_name", "compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
- }));
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
+ }));
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
- verifyNoMoreInteractions(libraryService);
- }
+ verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+ verifyNoMoreInteractions(libraryService);
+ }
@Test
public void getLibListWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- when(libraryService.getLibs(anyString(), anyString(), anyString())).thenReturn(getDocuments());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_list")
- .queryParam("exploratory_name", "explName")
- .queryParam("computational_name", "compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ authFailSetup();
+ when(libraryService.getLibs(anyString(), anyString(), anyString(), anyString())).thenReturn(getDocuments());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_list")
+ .queryParam("project_name", "projectName")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("computational_name", "compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
- }));
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(getDocuments(), response.readEntity(new GenericType<List<Document>>() {
+ }));
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
- verifyNoMoreInteractions(libraryService);
- }
+ verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+ verifyNoMoreInteractions(libraryService);
+ }
@Test
public void getLibListWithException() {
- doThrow(new DlabException("Cannot load installed libraries"))
- .when(libraryService).getLibs(anyString(), anyString(), anyString());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_list")
- .queryParam("exploratory_name", "explName")
- .queryParam("computational_name", "compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ doThrow(new DlabException("Cannot load installed libraries"))
+ .when(libraryService).getLibs(anyString(), anyString(), anyString(), anyString());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_list")
+ .queryParam("project_name", "projectName")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("computational_name", "compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(libraryService).getLibs(USER.toLowerCase(), "explName", "compName");
- verifyNoMoreInteractions(libraryService);
- }
+ verify(libraryService).getLibs(USER.toLowerCase(), "projectName", "explName", "compName");
+ verifyNoMoreInteractions(libraryService);
+ }
@Test
public void getLibListFormatted() {
- when(libraryService.getLibInfo(anyString(), anyString())).thenReturn(getLibInfoRecords());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
- .queryParam("exploratory_name", "explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ when(libraryService.getLibInfo(anyString(), anyString(), anyString())).thenReturn(getLibInfoRecords());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("project_name", "projectName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(libraryService);
- }
+ verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(libraryService);
+ }
@Test
public void getLibListFormattedWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- when(libraryService.getLibInfo(anyString(), anyString())).thenReturn(getLibInfoRecords());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
- .queryParam("exploratory_name", "explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ authFailSetup();
+ when(libraryService.getLibInfo(anyString(), anyString(), anyString())).thenReturn(getLibInfoRecords());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("project_name", "projectName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(libraryService);
- }
+ verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(libraryService);
+ }
@Test
public void getLibListFormattedWithException() {
- doThrow(new DlabException("Cannot load formatted list of installed libraries"))
- .when(libraryService).getLibInfo(anyString(), anyString());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
- .queryParam("exploratory_name", "explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ doThrow(new DlabException("Cannot load formatted list of installed libraries"))
+ .when(libraryService).getLibInfo(anyString(), anyString(), anyString());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_list/formatted")
+ .queryParam("exploratory_name", "explName")
+ .queryParam("project_name", "projectName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(libraryService).getLibInfo(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(libraryService);
- }
+ verify(libraryService).getLibInfo(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(libraryService);
+ }
@Test
public void libInstall() {
- when(libraryService.installComputationalLibs(any(UserInfo.class), anyString(), anyString(),
- anyListOf(LibInstallDTO.class))).thenReturn(UUID);
- LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
- libInstallFormDTO.setComputationalName(COMPUTATIONAL_NAME);
- libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
- libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_install")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(libInstallFormDTO));
+ when(libraryService.installComputationalLibs(any(UserInfo.class), anyString(), anyString(),
+ anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
+ LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
+ libInstallFormDTO.setComputationalName(COMPUTATIONAL_NAME);
+ libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
+ libInstallFormDTO.setProject(PROJECT);
+ libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_install")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(libInstallFormDTO));
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- assertEquals(UUID, response.readEntity(String.class));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(UUID, response.readEntity(String.class));
- verify(libraryService).installComputationalLibs(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
- eq(COMPUTATIONAL_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
- verifyNoMoreInteractions(libraryService);
- verifyZeroInteractions(provisioningService, requestId);
- }
+ verify(libraryService).installComputationalLibs(refEq(getUserInfo()), eq(PROJECT),
+ eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
+ verifyNoMoreInteractions(libraryService);
+ verifyZeroInteractions(provisioningService, requestId);
+ }
@Test
public void libInstallWithoutComputational() {
- when(libraryService.installExploratoryLibs(any(UserInfo.class), anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
- LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
- libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
- libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/lib_install")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(libInstallFormDTO));
+ when(libraryService.installExploratoryLibs(any(UserInfo.class), anyString(), anyString(), anyListOf(LibInstallDTO.class))).thenReturn(UUID);
+ LibInstallFormDTO libInstallFormDTO = new LibInstallFormDTO();
+ libInstallFormDTO.setNotebookName(EXPLORATORY_NAME);
+ libInstallFormDTO.setLibs(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION)));
+ libInstallFormDTO.setProject(PROJECT);
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/lib_install")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(libInstallFormDTO));
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- assertEquals(UUID, response.readEntity(String.class));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(UUID, response.readEntity(String.class));
- verify(libraryService).installExploratoryLibs(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
- eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
- verifyNoMoreInteractions(libraryService);
- verifyZeroInteractions(provisioningService, requestId);
- }
+ verify(libraryService).installExploratoryLibs(refEq(getUserInfo()), eq(PROJECT),
+ eq(EXPLORATORY_NAME), eq(singletonList(new LibInstallDTO(LIB_GROUP, LIB_NAME, LIB_VERSION))));
+ verifyNoMoreInteractions(libraryService);
+ verifyZeroInteractions(provisioningService, requestId);
+ }
@Test
public void getLibraryListWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
- .thenReturn(getUserInstanceDto());
- SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
- searchLibsFormDTO.setComputationalName("compName");
- searchLibsFormDTO.setNotebookName("explName");
- searchLibsFormDTO.setGroup("someGroup");
- searchLibsFormDTO.setStartWith("someText");
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/search/lib_list")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(searchLibsFormDTO));
+ authFailSetup();
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+ .thenReturn(getUserInstanceDto());
+ SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+ searchLibsFormDTO.setComputationalName("compName");
+ searchLibsFormDTO.setNotebookName("explName");
+ searchLibsFormDTO.setGroup("someGroup");
+ searchLibsFormDTO.setStartWith("someText");
+ searchLibsFormDTO.setProjectName("projectName");
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(searchLibsFormDTO));
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getLibraryListWithException() {
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
- .thenReturn(getUserInstanceDto());
- SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
- searchLibsFormDTO.setComputationalName("compName");
- searchLibsFormDTO.setNotebookName("explName");
- searchLibsFormDTO.setGroup("someGroup");
- searchLibsFormDTO.setStartWith("someText");
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/search/lib_list")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(searchLibsFormDTO));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString()))
+ .thenReturn(getUserInstanceDto());
+ SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+ searchLibsFormDTO.setComputationalName("compName");
+ searchLibsFormDTO.setNotebookName("explName");
+ searchLibsFormDTO.setGroup("someGroup");
+ searchLibsFormDTO.setStartWith("someText");
+ searchLibsFormDTO.setProjectName("projectName");
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(searchLibsFormDTO));
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName", "compName");
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName", "compName");
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getLibraryListWithoutComputationalWithException() {
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString()))
- .thenReturn(getUserInstanceDto());
- SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
- searchLibsFormDTO.setComputationalName("");
- searchLibsFormDTO.setNotebookName("explName");
- searchLibsFormDTO.setGroup("someGroup");
- searchLibsFormDTO.setStartWith("someText");
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/search/lib_list")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(searchLibsFormDTO));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString()))
+ .thenReturn(getUserInstanceDto());
+ SearchLibsFormDTO searchLibsFormDTO = new SearchLibsFormDTO();
+ searchLibsFormDTO.setComputationalName("");
+ searchLibsFormDTO.setNotebookName("explName");
+ searchLibsFormDTO.setGroup("someGroup");
+ searchLibsFormDTO.setStartWith("someText");
+ searchLibsFormDTO.setProjectName("projectName");
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/search/lib_list")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(searchLibsFormDTO));
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getMavenArtifact() {
@@ -422,11 +451,14 @@
}
private UserInstanceDTO getUserInstanceDto() {
- UserComputationalResource ucResource = new UserComputationalResource();
- ucResource.setComputationalName("compName");
- return new UserInstanceDTO().withUser(USER).withExploratoryName("explName")
- .withResources(singletonList(ucResource));
- }
+ UserComputationalResource ucResource = new UserComputationalResource();
+ ucResource.setComputationalName("compName");
+ return new UserInstanceDTO()
+ .withUser(USER)
+ .withExploratoryName("explName")
+ .withProject(PROJECT)
+ .withResources(singletonList(ucResource));
+ }
private List<Document> getDocuments() {
return singletonList(new Document());
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
index c1df4c7..b85f631 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/ProjectResourceTest.java
@@ -2,6 +2,7 @@
import com.epam.dlab.auth.UserInfo;
import com.epam.dlab.backendapi.resources.dto.KeysDTO;
+import com.epam.dlab.backendapi.resources.dto.ProjectActionFormDTO;
import com.epam.dlab.backendapi.service.AccessKeyService;
import com.epam.dlab.backendapi.service.ProjectService;
import com.epam.dlab.exceptions.DlabException;
@@ -17,11 +18,14 @@
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+import java.util.Collections;
+
import static org.junit.Assert.*;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
import static org.mockito.Mockito.verifyNoMoreInteractions;
+
public class ProjectResourceTest extends TestBase {
private ProjectService projectService = mock(ProjectService.class);
private AccessKeyService keyService = mock(AccessKeyService.class);
@@ -30,7 +34,6 @@
public final ResourceTestRule resources = getResourceTestRuleInstance(
new ProjectResource(projectService, keyService));
-
@Before
public void setup() throws AuthenticationException {
authSetup();
@@ -51,15 +54,28 @@
}
@Test
- public void stopProjectWithResources() {
+ public void stopProject() {
final Response response = resources.getJerseyTest()
- .target("project/managing/stop/" + "projectName")
+ .target("project/stop")
.request()
.header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(""));
+ .post(Entity.json(getProjectActionDTO()));
assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
- verify(projectService).stopWithResources(any(UserInfo.class), anyString());
+ verify(projectService).stopWithResources(any(UserInfo.class), anyList(), anyString());
+ verifyNoMoreInteractions(projectService);
+ }
+
+ @Test
+ public void startProject() {
+ final Response response = resources.getJerseyTest()
+ .target("project/start")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(getProjectActionDTO()));
+
+ assertEquals(HttpStatus.SC_ACCEPTED, response.getStatus());
+ verify(projectService).start(any(UserInfo.class), anyList(), anyString());
verifyNoMoreInteractions(projectService);
}
@@ -98,4 +114,8 @@
verify(keyService).generateKeys(getUserInfo());
verifyNoMoreInteractions(keyService);
}
-}
\ No newline at end of file
+
+ private ProjectActionFormDTO getProjectActionDTO() {
+ return new ProjectActionFormDTO("DLAB", Collections.singletonList("https://localhost:8083/"));
+ }
+}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
index 08e601e..c763238 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/resources/SchedulerJobResourceTest.java
@@ -36,7 +36,12 @@
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
import java.util.Collections;
@@ -46,7 +51,13 @@
import static org.junit.Assert.assertNull;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
public class SchedulerJobResourceTest extends TestBase {
@@ -63,228 +74,228 @@
@Test
public void updateExploratoryScheduler() {
- doNothing().when(schedulerJobService)
- .updateExploratorySchedulerData(anyString(), anyString(), any(SchedulerJobDTO.class));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(getSchedulerJobDTO()));
+ doNothing().when(schedulerJobService)
+ .updateExploratorySchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(getSchedulerJobDTO()));
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(),
- "explName", getSchedulerJobDTO());
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+ "explName", getSchedulerJobDTO());
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void updateExploratorySchedulerWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- doNothing().when(schedulerJobService)
- .updateExploratorySchedulerData(anyString(), anyString(), any(SchedulerJobDTO.class));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName")
- .request()
- .header("Authorization", String.join(" ", "Bearer", TOKEN))
- .post(Entity.json(getSchedulerJobDTO()));
+ authFailSetup();
+ doNothing().when(schedulerJobService)
+ .updateExploratorySchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+ .request()
+ .header("Authorization", String.join(" ", "Bearer", TOKEN))
+ .post(Entity.json(getSchedulerJobDTO()));
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(),
- "explName", getSchedulerJobDTO());
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+ "explName", getSchedulerJobDTO());
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void updateExploratorySchedulerWithException() {
- doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
- .when(schedulerJobService).updateExploratorySchedulerData(anyString(), anyString(),
- any(SchedulerJobDTO.class));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(getSchedulerJobDTO()));
+ doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
+ .when(schedulerJobService).updateExploratorySchedulerData(anyString(), anyString(), anyString(),
+ any(SchedulerJobDTO.class));
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(getSchedulerJobDTO()));
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "explName",
- getSchedulerJobDTO());
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).updateExploratorySchedulerData(USER.toLowerCase(), "projectName",
+ "explName", getSchedulerJobDTO());
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void upsertComputationalScheduler() {
- doNothing().when(schedulerJobService)
- .updateComputationalSchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(getSchedulerJobDTO()));
+ doNothing().when(schedulerJobService)
+ .updateComputationalSchedulerData(anyString(), anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(getSchedulerJobDTO()));
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
- "compName", getSchedulerJobDTO());
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+ "explName", "compName", getSchedulerJobDTO());
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void upsertComputationalSchedulerWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- doNothing().when(schedulerJobService)
- .updateComputationalSchedulerData(anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(getSchedulerJobDTO()));
+ authFailSetup();
+ doNothing().when(schedulerJobService)
+ .updateComputationalSchedulerData(anyString(), anyString(), anyString(), anyString(), any(SchedulerJobDTO.class));
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(getSchedulerJobDTO()));
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertNull(response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
- "compName", getSchedulerJobDTO());
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+ "explName", "compName", getSchedulerJobDTO());
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void upsertComputationalSchedulerWithException() {
- doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
- .when(schedulerJobService).updateComputationalSchedulerData(anyString(), anyString(), anyString(),
- any(SchedulerJobDTO.class));
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .post(Entity.json(getSchedulerJobDTO()));
+ doThrow(new ResourceInappropriateStateException("Can't create/update scheduler for user instance with status"))
+ .when(schedulerJobService).updateComputationalSchedulerData(anyString(), anyString(), anyString(),
+ anyString(), any(SchedulerJobDTO.class));
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .post(Entity.json(getSchedulerJobDTO()));
- assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_INTERNAL_SERVER_ERROR, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "explName",
- "compName", getSchedulerJobDTO());
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).updateComputationalSchedulerData(USER.toLowerCase(), "projectName",
+ "explName", "compName", getSchedulerJobDTO());
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void fetchSchedulerJobForUserAndExploratory() {
- when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString()))
- .thenReturn(getSchedulerJobDTO());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString()))
+ .thenReturn(getSchedulerJobDTO());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void fetchSchedulerJobForUserAndExploratoryWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString()))
- .thenReturn(getSchedulerJobDTO());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ authFailSetup();
+ when(schedulerJobService.fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString()))
+ .thenReturn(getSchedulerJobDTO());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void fetchSchedulerJobForUserAndExploratoryWithException() {
- doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory"))
- .when(schedulerJobService).fetchSchedulerJobForUserAndExploratory(anyString(), anyString());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory"))
+ .when(schedulerJobService).fetchSchedulerJobForUserAndExploratory(anyString(), anyString(), anyString());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "explName");
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).fetchSchedulerJobForUserAndExploratory(USER.toLowerCase(), "projectName", "explName");
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void fetchSchedulerJobForComputationalResource() {
- when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString()))
- .thenReturn(getSchedulerJobDTO());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString()))
+ .thenReturn(getSchedulerJobDTO());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
- "explName", "compName");
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+ "explName", "compName");
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void fetchSchedulerJobForComputationalResourceWithFailedAuth() throws AuthenticationException {
- authFailSetup();
- when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString()))
- .thenReturn(getSchedulerJobDTO());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ authFailSetup();
+ when(schedulerJobService.fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString()))
+ .thenReturn(getSchedulerJobDTO());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_OK, response.getStatus());
- assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_OK, response.getStatus());
+ assertEquals(getSchedulerJobDTO(), response.readEntity(SchedulerJobDTO.class));
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
- "explName", "compName");
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+ "explName", "compName");
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void fetchSchedulerJobForComputationalResourceWithException() {
- doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory with " +
- "computational resource")).when(schedulerJobService)
- .fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString());
- final Response response = resources.getJerseyTest()
- .target("/infrastructure_provision/exploratory_environment/scheduler/explName/compName")
- .request()
- .header("Authorization", "Bearer " + TOKEN)
- .get();
+ doThrow(new ResourceNotFoundException("Scheduler job data not found for user with exploratory with " +
+ "computational resource")).when(schedulerJobService)
+ .fetchSchedulerJobForComputationalResource(anyString(), anyString(), anyString(), anyString());
+ final Response response = resources.getJerseyTest()
+ .target("/infrastructure_provision/exploratory_environment/scheduler/projectName/explName/compName")
+ .request()
+ .header("Authorization", "Bearer " + TOKEN)
+ .get();
- assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
- assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
+ assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatus());
+ assertEquals(MediaType.APPLICATION_JSON, response.getHeaderString(HttpHeaders.CONTENT_TYPE));
- verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(),
- "explName", "compName");
- verifyNoMoreInteractions(schedulerJobService);
- }
+ verify(schedulerJobService).fetchSchedulerJobForComputationalResource(USER.toLowerCase(), "projectName",
+ "explName", "compName");
+ verifyNoMoreInteractions(schedulerJobService);
+ }
@Test
public void testGetActiveSchedulers() {
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
index 8331235..74fc7f0 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ComputationalServiceImplTest.java
@@ -38,10 +38,15 @@
import com.epam.dlab.dto.UserInstanceDTO;
import com.epam.dlab.dto.UserInstanceStatus;
import com.epam.dlab.dto.aws.computational.ClusterConfig;
-import com.epam.dlab.dto.base.DataEngineType;
import com.epam.dlab.dto.base.computational.ComputationalBase;
import com.epam.dlab.dto.base.edge.EdgeInfo;
-import com.epam.dlab.dto.computational.*;
+import com.epam.dlab.dto.computational.ComputationalClusterConfigDTO;
+import com.epam.dlab.dto.computational.ComputationalStartDTO;
+import com.epam.dlab.dto.computational.ComputationalStatusDTO;
+import com.epam.dlab.dto.computational.ComputationalStopDTO;
+import com.epam.dlab.dto.computational.ComputationalTerminateDTO;
+import com.epam.dlab.dto.computational.SparkStandaloneClusterResource;
+import com.epam.dlab.dto.computational.UserComputationalResource;
import com.epam.dlab.exceptions.DlabException;
import com.epam.dlab.exceptions.ResourceNotFoundException;
import com.epam.dlab.rest.client.RESTService;
@@ -62,42 +67,60 @@
import java.util.List;
import java.util.Optional;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.CREATING;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
import static java.util.Collections.singletonList;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class ComputationalServiceImplTest {
- private static final long MAX_INACTIVITY = 10L;
- private static final String DOCKER_DLAB_DATAENGINE = "docker.dlab-dataengine";
- private static final String DOCKER_DLAB_DATAENGINE_SERVICE = "docker.dlab-dataengine-service";
- private static final String COMP_ID = "compId";
- private final String USER = "test";
- private final String TOKEN = "token";
- private final String EXPLORATORY_NAME = "expName";
- private final String COMP_NAME = "compName";
- private final String UUID = "1234-56789765-4321";
- private final LocalDateTime LAST_ACTIVITY = LocalDateTime.now().minusMinutes(MAX_INACTIVITY);
+ private static final long MAX_INACTIVITY = 10L;
+ private static final String DOCKER_DLAB_DATAENGINE = "docker.dlab-dataengine";
+ private static final String DOCKER_DLAB_DATAENGINE_SERVICE = "docker.dlab-dataengine-service";
+ private static final String COMP_ID = "compId";
+ private final String USER = "test";
+ private final String TOKEN = "token";
+ private final String EXPLORATORY_NAME = "expName";
+ private final String PROJECT = "project";
+ private final String COMP_NAME = "compName";
+ private final String UUID = "1234-56789765-4321";
+ private final LocalDateTime LAST_ACTIVITY = LocalDateTime.now().minusMinutes(MAX_INACTIVITY);
- private UserInfo userInfo;
- private List<ComputationalCreateFormDTO> formList;
- private UserInstanceDTO userInstance;
- private ComputationalStatusDTO computationalStatusDTOWithStatusTerminating;
- private ComputationalStatusDTO computationalStatusDTOWithStatusFailed;
- private ComputationalStatusDTO computationalStatusDTOWithStatusStopping;
- private ComputationalStatusDTO computationalStatusDTOWithStatusStarting;
- private SparkStandaloneClusterResource sparkClusterResource;
- private UserComputationalResource ucResource;
+ private UserInfo userInfo;
+ private List<ComputationalCreateFormDTO> formList;
+ private UserInstanceDTO userInstance;
+ private ComputationalStatusDTO computationalStatusDTOWithStatusTerminating;
+ private ComputationalStatusDTO computationalStatusDTOWithStatusFailed;
+ private ComputationalStatusDTO computationalStatusDTOWithStatusStopping;
+ private ComputationalStatusDTO computationalStatusDTOWithStatusStarting;
+ private SparkStandaloneClusterResource sparkClusterResource;
+ private UserComputationalResource ucResource;
- @Mock
- private ProjectService projectService;
- @Mock
- private ExploratoryDAO exploratoryDAO;
- @Mock
- private ComputationalDAO computationalDAO;
+ @Mock
+ private ProjectService projectService;
+ @Mock
+ private ExploratoryDAO exploratoryDAO;
+ @Mock
+ private ComputationalDAO computationalDAO;
@Mock
private RESTService provisioningService;
@Mock
@@ -132,149 +155,148 @@
@Test
public void createSparkCluster() {
- ProjectDTO projectDTO = getProjectDTO();
- when(projectService.get(anyString())).thenReturn(projectDTO);
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(computationalDAO.addComputational(anyString(), anyString(),
- any(SparkStandaloneClusterResource.class))).thenReturn(true);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ ProjectDTO projectDTO = getProjectDTO();
+ when(projectService.get(anyString())).thenReturn(projectDTO);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+ any(SparkStandaloneClusterResource.class))).thenReturn(true);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- ComputationalBase compBaseMocked = mock(ComputationalBase.class);
- when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
- any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class)))
- .thenReturn(compBaseMocked);
- when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
- when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+ ComputationalBase compBaseMocked = mock(ComputationalBase.class);
+ when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+ any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class)))
+ .thenReturn(compBaseMocked);
+ when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
+ when(requestId.put(anyString(), anyString())).thenReturn(UUID);
- SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
- boolean creationResult =
- computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
- assertTrue(creationResult);
+ SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+ boolean creationResult =
+ computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+ assertTrue(creationResult);
- verify(projectService).get("");
- verify(computationalDAO)
- .addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(sparkClusterResource));
+ verify(projectService).get(PROJECT);
+ verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(sparkClusterResource));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(requestBuilder).newComputationalCreate(
- refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(sparkClusterCreateForm), refEq(endpointDTO()));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(requestBuilder).newComputationalCreate(
+ refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(sparkClusterCreateForm), refEq(endpointDTO()));
- verify(provisioningService)
- .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK, TOKEN, compBaseMocked,
- String.class);
+ verify(provisioningService)
+ .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_SPARK, TOKEN, compBaseMocked,
+ String.class);
- verify(requestId).put(USER, UUID);
- verifyNoMoreInteractions(projectService, configuration, computationalDAO, requestBuilder, provisioningService, requestId);
- }
- @Test
- public void createSparkClusterWhenResourceAlreadyExists() {
- when(computationalDAO.addComputational(anyString(), anyString(),
- any(SparkStandaloneClusterResource.class))).thenReturn(false);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ verify(requestId).put(USER, UUID);
+ verifyNoMoreInteractions(projectService, configuration, computationalDAO, requestBuilder, provisioningService, requestId);
+ }
+
+ @Test
+ public void createSparkClusterWhenResourceAlreadyExists() {
+ when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+ any(SparkStandaloneClusterResource.class))).thenReturn(false);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- boolean creationResult =
- computationalService.createSparkCluster(userInfo, (SparkStandaloneClusterCreateForm) formList.get(0),
- "");
- assertFalse(creationResult);
+ boolean creationResult = computationalService.createSparkCluster(userInfo, (SparkStandaloneClusterCreateForm) formList.get(0),
+ PROJECT);
+ assertFalse(creationResult);
- verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(sparkClusterResource));
- verifyNoMoreInteractions(configuration, computationalDAO);
- }
+ verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(sparkClusterResource));
+ verifyNoMoreInteractions(configuration, computationalDAO);
+ }
@Test
public void createSparkClusterWhenMethodFetchExploratoryFieldsThrowsException() {
- when(computationalDAO.addComputational(anyString(), anyString(),
- any(SparkStandaloneClusterResource.class))).thenReturn(true);
- doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
- .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+ when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+ any(SparkStandaloneClusterResource.class))).thenReturn(true);
+ doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+ .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
- try {
- computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
- } catch (ResourceNotFoundException e) {
- assertEquals("Exploratory for user with name not found", e.getMessage());
- }
+ SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+ try {
+ computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+ } catch (ResourceNotFoundException e) {
+ assertEquals("Exploratory for user with name not found", e.getMessage());
+ }
- verify(computationalDAO, never()).addComputational(USER, EXPLORATORY_NAME, sparkClusterResource);
- verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
- "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(configuration, computationalDAO, exploratoryDAO);
- }
+ verify(computationalDAO, never()).addComputational(USER, EXPLORATORY_NAME, PROJECT, sparkClusterResource);
+ verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
+ "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(configuration, computationalDAO, exploratoryDAO);
+ }
@Test
public void createSparkClusterWhenMethodNewComputationalCreateThrowsException() {
- ProjectDTO projectDTO = getProjectDTO();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(projectService.get(anyString())).thenReturn(projectDTO);
- when(computationalDAO.addComputational(anyString(), anyString(),
- any(SparkStandaloneClusterResource.class))).thenReturn(true);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ ProjectDTO projectDTO = getProjectDTO();
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(projectService.get(anyString())).thenReturn(projectDTO);
+ when(computationalDAO.addComputational(anyString(), anyString(), anyString(),
+ any(SparkStandaloneClusterResource.class))).thenReturn(true);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- doThrow(new DlabException("Cannot create instance of resource class "))
- .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
- any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class));
+ doThrow(new DlabException("Cannot create instance of resource class "))
+ .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+ any(UserInstanceDTO.class), any(SparkStandaloneClusterCreateForm.class), any(EndpointDTO.class));
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
- try {
- computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, "");
- } catch (DlabException e) {
- assertEquals("Cannot create instance of resource class ", e.getMessage());
- }
- verify(projectService).get("");
- verify(computationalDAO).addComputational(USER, EXPLORATORY_NAME, sparkClusterResource);
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(requestBuilder).newComputationalCreate(userInfo, projectDTO, userInstance, sparkClusterCreateForm, endpointDTO());
- verifyNoMoreInteractions(projectService, configuration, computationalDAO, exploratoryDAO, requestBuilder);
- }
+ SparkStandaloneClusterCreateForm sparkClusterCreateForm = (SparkStandaloneClusterCreateForm) formList.get(0);
+ try {
+ computationalService.createSparkCluster(userInfo, sparkClusterCreateForm, PROJECT);
+ } catch (DlabException e) {
+ assertEquals("Cannot create instance of resource class ", e.getMessage());
+ }
+ verify(projectService).get(PROJECT);
+ verify(computationalDAO).addComputational(USER, EXPLORATORY_NAME, PROJECT, sparkClusterResource);
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(requestBuilder).newComputationalCreate(userInfo, projectDTO, userInstance, sparkClusterCreateForm, endpointDTO());
+ verifyNoMoreInteractions(projectService, configuration, computationalDAO, exploratoryDAO, requestBuilder);
+ }
@Test
public void terminateComputationalEnvironment() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
- String explId = "explId";
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
+ String explId = "explId";
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- String compId = "compId";
- UserComputationalResource ucResource = new UserComputationalResource();
- ucResource.setComputationalName(COMP_NAME);
- ucResource.setImageName("dataengine-service");
- ucResource.setComputationalId(compId);
- when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+ String compId = "compId";
+ UserComputationalResource ucResource = new UserComputationalResource();
+ ucResource.setComputationalName(COMP_NAME);
+ ucResource.setImageName("dataengine-service");
+ ucResource.setComputationalId(compId);
+ when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
- ComputationalTerminateDTO ctDto = new ComputationalTerminateDTO();
- ctDto.setComputationalName(COMP_NAME);
- ctDto.setExploratoryName(EXPLORATORY_NAME);
- when(requestBuilder.newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
- any(UserComputationalResource.class), any(EndpointDTO.class))).thenReturn(ctDto);
+ ComputationalTerminateDTO ctDto = new ComputationalTerminateDTO();
+ ctDto.setComputationalName(COMP_NAME);
+ ctDto.setExploratoryName(EXPLORATORY_NAME);
+ when(requestBuilder.newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
+ any(UserComputationalResource.class), any(EndpointDTO.class))).thenReturn(ctDto);
- when(provisioningService.post(anyString(), anyString(), any(ComputationalTerminateDTO.class), any()))
- .thenReturn(UUID);
- when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+ when(provisioningService.post(anyString(), anyString(), any(ComputationalTerminateDTO.class), any()))
+ .thenReturn(UUID);
+ when(requestId.put(anyString(), anyString())).thenReturn(UUID);
- computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
+ computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
- verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
+ verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
- verify(provisioningService).post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC, TOKEN, ctDto,
- String.class);
+ verify(provisioningService).post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_TERMINATE_CLOUD_SPECIFIC, TOKEN, ctDto,
+ String.class);
- verify(requestId).put(USER, UUID);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
- }
+ verify(requestId).put(USER, UUID);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
+ }
@Test
public void terminateComputationalEnvironmentWhenMethodUpdateComputationalStatusThrowsException() {
@@ -285,11 +307,11 @@
when(computationalDAO.updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self")))
.thenReturn(mock(UpdateResult.class));
- try {
- computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
- } catch (DlabException e) {
- assertEquals("Could not update computational resource status", e.getMessage());
- }
+ try {
+ computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ } catch (DlabException e) {
+ assertEquals("Could not update computational resource status", e.getMessage());
+ }
verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
@@ -298,400 +320,384 @@
@Test
public void terminateComputationalEnvironmentWhenMethodFetchComputationalFieldsThrowsException() {
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
- String explId = "explId";
- when(exploratoryDAO.fetchExploratoryId(anyString(), anyString())).thenReturn(explId);
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- doThrow(new DlabException("Computational resource for user with exploratory name not found."))
- .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ doThrow(new DlabException("Computational resource for user with exploratory name not found."))
+ .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- try {
- computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
- } catch (DlabException e) {
- assertEquals("Computational resource for user with exploratory name not found.", e.getMessage());
- }
+ try {
+ computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ } catch (DlabException e) {
+ assertEquals("Computational resource for user with exploratory name not found.", e.getMessage());
+ }
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
- }
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
+ }
@Test
public void terminateComputationalEnvironmentWhenMethodNewComputationalTerminateThrowsException() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- String compId = "compId";
- UserComputationalResource ucResource = new UserComputationalResource();
- ucResource.setComputationalName(COMP_NAME);
- ucResource.setImageName("dataengine-service");
- ucResource.setComputationalId(compId);
- when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+ String compId = "compId";
+ UserComputationalResource ucResource = new UserComputationalResource();
+ ucResource.setComputationalName(COMP_NAME);
+ ucResource.setImageName("dataengine-service");
+ ucResource.setComputationalId(compId);
+ when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
- doThrow(new DlabException("Cannot create instance of resource class "))
- .when(requestBuilder).newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
- any(UserComputationalResource.class), any(EndpointDTO.class));
+ doThrow(new DlabException("Cannot create instance of resource class "))
+ .when(requestBuilder).newComputationalTerminate(any(UserInfo.class), any(UserInstanceDTO.class),
+ any(UserComputationalResource.class), any(EndpointDTO.class));
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- try {
- computationalService.terminateComputational(userInfo, EXPLORATORY_NAME, COMP_NAME);
- } catch (DlabException e) {
- assertEquals("Cannot create instance of resource class ", e.getMessage());
- }
+ try {
+ computationalService.terminateComputational(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ } catch (DlabException e) {
+ assertEquals("Cannot create instance of resource class ", e.getMessage());
+ }
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusTerminating, "self"));
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
- verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
- verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder);
- }
+ verify(requestBuilder).newComputationalTerminate(userInfo, userInstance, ucResource, endpointDTO());
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+ verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder);
+ }
@Test
public void createDataEngineService() {
- ProjectDTO projectDTO = getProjectDTO();
- when(projectService.get(anyString())).thenReturn(projectDTO);
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
- .thenReturn(true);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ ProjectDTO projectDTO = getProjectDTO();
+ when(projectService.get(anyString())).thenReturn(projectDTO);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(computationalDAO.addComputational(anyString(), anyString(), anyString(), any(UserComputationalResource.class)))
+ .thenReturn(true);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- ComputationalBase compBaseMocked = mock(ComputationalBase.class);
- when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
- any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class)))
- .thenReturn(compBaseMocked);
+ ComputationalBase compBaseMocked = mock(ComputationalBase.class);
+ when(requestBuilder.newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+ any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class)))
+ .thenReturn(compBaseMocked);
- when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
- when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+ when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any())).thenReturn(UUID);
+ when(requestId.put(anyString(), anyString())).thenReturn(UUID);
- boolean creationResult =
- computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, "");
- assertTrue(creationResult);
+ boolean creationResult =
+ computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, PROJECT);
+ assertTrue(creationResult);
- verify(projectService).get("");
+ verify(projectService).get(PROJECT);
- verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
+ verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
- verify(requestBuilder).newComputationalCreate(
- refEq(userInfo), refEq(projectDTO), refEq(userInstance), any(ComputationalCreateFormDTO.class), refEq(endpointDTO()));
+ verify(requestBuilder).newComputationalCreate(
+ refEq(userInfo), refEq(projectDTO), refEq(userInstance), any(ComputationalCreateFormDTO.class), refEq(endpointDTO()));
- verify(provisioningService)
- .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC, TOKEN,
- compBaseMocked, String.class);
+ verify(provisioningService)
+ .post(endpointDTO().getUrl() + ComputationalAPI.COMPUTATIONAL_CREATE_CLOUD_SPECIFIC, TOKEN,
+ compBaseMocked, String.class);
- verify(requestId).put(USER, UUID);
- verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
- }
+ verify(requestId).put(USER, UUID);
+ verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder, provisioningService, requestId);
+ }
@Test
public void createDataEngineServiceWhenComputationalResourceNotAdded() {
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
- .thenReturn(false);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(computationalDAO.addComputational(anyString(), anyString(), any(), any(UserComputationalResource.class)))
+ .thenReturn(false);
- boolean creationResult = computationalService.createDataEngineService(userInfo, formList.get(1), ucResource,
- "");
- assertFalse(creationResult);
+ boolean creationResult = computationalService.createDataEngineService(userInfo, formList.get(1), ucResource,
+ PROJECT);
+ assertFalse(creationResult);
- verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
- verifyNoMoreInteractions(computationalDAO);
- }
+ verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
+ verifyNoMoreInteractions(computationalDAO);
+ }
@Test
public void createDataEngineServiceWhenMethodFetchExploratoryFieldsThrowsException() {
- when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
- .thenReturn(true);
- doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
- .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+ when(computationalDAO.addComputational(anyString(), anyString(), anyString(), any(UserComputationalResource.class)))
+ .thenReturn(true);
+ doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+ .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- try {
- computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, "");
- } catch (DlabException e) {
- assertEquals("Exploratory for user with name not found", e.getMessage());
- }
+ try {
+ computationalService.createDataEngineService(userInfo, formList.get(1), ucResource, PROJECT);
+ } catch (DlabException e) {
+ assertEquals("Exploratory for user with name not found", e.getMessage());
+ }
- verify(computationalDAO, never())
- .addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
+ verify(computationalDAO, never())
+ .addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
- verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
- "self"));
- verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
- }
+ verify(computationalDAO, never()).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed,
+ "self"));
+ verifyNoMoreInteractions(computationalDAO, exploratoryDAO);
+ }
@Test
public void createDataEngineServiceWhenMethodNewComputationalCreateThrowsException() {
- ProjectDTO projectDTO = getProjectDTO();
- when(projectService.get(anyString())).thenReturn(projectDTO);
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(computationalDAO.addComputational(anyString(), anyString(), any(UserComputationalResource.class)))
- .thenReturn(true);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ ProjectDTO projectDTO = getProjectDTO();
+ when(projectService.get(anyString())).thenReturn(projectDTO);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(computationalDAO.addComputational(anyString(), anyString(), any(), any(UserComputationalResource.class)))
+ .thenReturn(true);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- doThrow(new DlabException("Cannot create instance of resource class "))
- .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
- any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class));
+ doThrow(new DlabException("Cannot create instance of resource class "))
+ .when(requestBuilder).newComputationalCreate(any(UserInfo.class), any(ProjectDTO.class),
+ any(UserInstanceDTO.class), any(ComputationalCreateFormDTO.class), any(EndpointDTO.class));
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- ComputationalCreateFormDTO computationalCreateFormDTO = formList.get(1);
- try {
- computationalService.createDataEngineService(userInfo, computationalCreateFormDTO, ucResource, "");
- } catch (DlabException e) {
- assertEquals("Could not send request for creation the computational resource compName: " +
- "Cannot create instance of resource class ", e.getMessage());
- }
+ ComputationalCreateFormDTO computationalCreateFormDTO = formList.get(1);
+ try {
+ computationalService.createDataEngineService(userInfo, computationalCreateFormDTO, ucResource, PROJECT);
+ } catch (DlabException e) {
+ assertEquals("Could not send request for creation the computational resource compName: " +
+ "Cannot create instance of resource class ", e.getMessage());
+ }
- verify(projectService).get("");
- verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), refEq(ucResource));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(requestBuilder).newComputationalCreate(
- refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(computationalCreateFormDTO), refEq(endpointDTO()));
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
+ verify(projectService).get(PROJECT);
+ verify(computationalDAO).addComputational(eq(USER), eq(EXPLORATORY_NAME), eq(PROJECT), refEq(ucResource));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(requestBuilder).newComputationalCreate(
+ refEq(userInfo), refEq(projectDTO), refEq(userInstance), refEq(computationalCreateFormDTO), refEq(endpointDTO()));
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusFailed, "self"));
- verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder);
- }
+ verifyNoMoreInteractions(projectService, computationalDAO, exploratoryDAO, requestBuilder);
+ }
@Test
public void stopSparkCluster() {
- final UserInstanceDTO exploratory = getUserInstanceDto();
- exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE)));
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ final UserInstanceDTO exploratory = getUserInstanceDto();
+ exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE)));
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- ComputationalStopDTO computationalStopDTO = new ComputationalStopDTO();
- when(requestBuilder.newComputationalStop(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
- any(EndpointDTO.class))).thenReturn(computationalStopDTO);
- when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
- .thenReturn("someUuid");
- when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
+ ComputationalStopDTO computationalStopDTO = new ComputationalStopDTO();
+ when(requestBuilder.newComputationalStop(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
+ any(EndpointDTO.class))).thenReturn(computationalStopDTO);
+ when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
+ .thenReturn("someUuid");
+ when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
- computationalService.stopSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME);
+ computationalService.stopSparkCluster(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStopping, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
- verify(requestBuilder).newComputationalStop(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
- verify(provisioningService)
- .post(eq(endpointDTO().getUrl() + "computational/stop/spark"), eq(TOKEN), refEq(computationalStopDTO),
- eq(String.class));
- verify(requestId).put(USER, "someUuid");
- verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
- provisioningService, requestId);
- }
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStopping, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+ verify(requestBuilder).newComputationalStop(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
+ verify(provisioningService)
+ .post(eq(endpointDTO().getUrl() + "computational/stop/spark"), eq(TOKEN), refEq(computationalStopDTO),
+ eq(String.class));
+ verify(requestId).put(USER, "someUuid");
+ verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
+ provisioningService, requestId);
+ }
@Test
public void stopSparkClusterWhenDataengineTypeIsAnother() {
- final UserInstanceDTO exploratory = getUserInstanceDto();
- exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE_SERVICE)));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
- expectedException.expect(IllegalStateException.class);
- expectedException.expectMessage("There is no running dataengine compName for exploratory expName");
+ final UserInstanceDTO exploratory = getUserInstanceDto();
+ exploratory.setResources(singletonList(getUserComputationalResource(RUNNING, DOCKER_DLAB_DATAENGINE_SERVICE)));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+ expectedException.expect(IllegalStateException.class);
+ expectedException.expectMessage("There is no running dataengine compName for exploratory expName");
- computationalService.stopSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME);
- }
+ computationalService.stopSparkCluster(userInfo, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ }
@Test
public void startSparkCluster() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- final UserInstanceDTO exploratory = getUserInstanceDto();
- exploratory.setResources(singletonList(getUserComputationalResource(STOPPED, DOCKER_DLAB_DATAENGINE)));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
- when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
- .thenReturn(mock(UpdateResult.class));
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ final UserInstanceDTO exploratory = getUserInstanceDto();
+ exploratory.setResources(singletonList(getUserComputationalResource(STOPPED, DOCKER_DLAB_DATAENGINE)));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(exploratory);
+ when(computationalDAO.updateComputationalStatus(any(ComputationalStatusDTO.class)))
+ .thenReturn(mock(UpdateResult.class));
- ComputationalStartDTO computationalStartDTO = new ComputationalStartDTO();
- when(requestBuilder.newComputationalStart(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
- any(EndpointDTO.class))).thenReturn(computationalStartDTO);
- when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
- .thenReturn("someUuid");
- when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
+ ComputationalStartDTO computationalStartDTO = new ComputationalStartDTO();
+ when(requestBuilder.newComputationalStart(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
+ any(EndpointDTO.class))).thenReturn(computationalStartDTO);
+ when(provisioningService.post(anyString(), anyString(), any(ComputationalBase.class), any()))
+ .thenReturn("someUuid");
+ when(requestId.put(anyString(), anyString())).thenReturn("someUuid");
- computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, "");
+ computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, PROJECT);
- verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStarting, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
- verify(requestBuilder).newComputationalStart(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
- verify(provisioningService)
- .post(eq(endpointDTO().getUrl() + "computational/start/spark"), eq(TOKEN),
- refEq(computationalStartDTO),
- eq(String.class));
- verify(requestId).put(USER, "someUuid");
- verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
- provisioningService, requestId);
- }
+ verify(computationalDAO).updateComputationalStatus(refEq(computationalStatusDTOWithStatusStarting, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+ verify(requestBuilder).newComputationalStart(refEq(userInfo), refEq(exploratory), eq(COMP_NAME), refEq(endpointDTO()));
+ verify(provisioningService)
+ .post(eq(endpointDTO().getUrl() + "computational/start/spark"), eq(TOKEN),
+ refEq(computationalStartDTO),
+ eq(String.class));
+ verify(requestId).put(USER, "someUuid");
+ verifyNoMoreInteractions(computationalDAO, exploratoryDAO, requestBuilder,
+ provisioningService, requestId);
+ }
@Test
public void startSparkClusterWhenDataengineStatusIsRunning() {
- final UserInstanceDTO userInstanceDto = getUserInstanceDto();
- userInstanceDto.setResources(singletonList(getUserComputationalResource(RUNNING,
- DOCKER_DLAB_DATAENGINE_SERVICE)));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+ final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+ userInstanceDto.setResources(singletonList(getUserComputationalResource(RUNNING,
+ DOCKER_DLAB_DATAENGINE_SERVICE)));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
- expectedException.expect(IllegalStateException.class);
- expectedException.expectMessage("There is no stopped dataengine compName for exploratory expName");
+ expectedException.expect(IllegalStateException.class);
+ expectedException.expectMessage("There is no stopped dataengine compName for exploratory expName");
- computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, "");
- }
-
- @Test
- @SuppressWarnings("unchecked")
- public void updateComputationalsReuploadKeyFlag() {
- doNothing().when(computationalDAO).updateReuploadKeyFlagForComputationalResources(anyString(), any(List.class),
- any(List.class), anyBoolean(), anyVararg());
-
- computationalService.updateComputationalsReuploadKeyFlag(USER, singletonList(RUNNING),
- singletonList(DataEngineType.SPARK_STANDALONE), true, RUNNING);
-
- verify(computationalDAO).updateReuploadKeyFlagForComputationalResources(USER, singletonList
- (RUNNING),
- singletonList(DataEngineType.SPARK_STANDALONE), true, RUNNING);
- verifyNoMoreInteractions(computationalDAO);
- }
+ computationalService.startSparkCluster(userInfo, EXPLORATORY_NAME, COMP_NAME, PROJECT);
+ }
@Test
public void getComputationalResource() {
- when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString())).thenReturn(ucResource);
+ when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString())).thenReturn(ucResource);
- Optional<UserComputationalResource> expectedResource = Optional.of(ucResource);
- Optional<UserComputationalResource> actualResource =
- computationalService.getComputationalResource(USER, EXPLORATORY_NAME, COMP_NAME);
- assertEquals(expectedResource, actualResource);
+ Optional<UserComputationalResource> expectedResource = Optional.of(ucResource);
+ Optional<UserComputationalResource> actualResource =
+ computationalService.getComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ assertEquals(expectedResource, actualResource);
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
- verifyNoMoreInteractions(computationalDAO);
- }
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ verifyNoMoreInteractions(computationalDAO);
+ }
@Test
public void getComputationalResourceWithException() {
- doThrow(new DlabException("Computational resource not found"))
- .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
+ doThrow(new DlabException("Computational resource not found"))
+ .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
- Optional<UserComputationalResource> expectedResource = Optional.empty();
- Optional<UserComputationalResource> actualResource =
- computationalService.getComputationalResource(USER, EXPLORATORY_NAME, COMP_NAME);
- assertEquals(expectedResource, actualResource);
+ Optional<UserComputationalResource> expectedResource = Optional.empty();
+ Optional<UserComputationalResource> actualResource =
+ computationalService.getComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ assertEquals(expectedResource, actualResource);
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMP_NAME);
- verifyNoMoreInteractions(computationalDAO);
- }
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ verifyNoMoreInteractions(computationalDAO);
+ }
@Test
public void testUpdateSparkClusterConfig() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- final ComputationalClusterConfigDTO clusterConfigDTO = new ComputationalClusterConfigDTO();
- final UserInstanceDTO userInstanceDto = getUserInstanceDto();
- final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
- userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(RUNNING, COMP_NAME)));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
- when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
- any(UserComputationalResource.class), anyListOf(ClusterConfig.class), any(EndpointDTO.class)))
- .thenReturn(clusterConfigDTO);
- when(provisioningService.post(anyString(), anyString(), any(ComputationalClusterConfigDTO.class), any()))
- .thenReturn("someUuid");
- computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME,
- config);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ final ComputationalClusterConfigDTO clusterConfigDTO = new ComputationalClusterConfigDTO();
+ final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+ final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+ userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(RUNNING, COMP_NAME)));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+ when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
+ any(UserComputationalResource.class), anyListOf(ClusterConfig.class), any(EndpointDTO.class)))
+ .thenReturn(clusterConfigDTO);
+ when(provisioningService.post(anyString(), anyString(), any(ComputationalClusterConfigDTO.class), any()))
+ .thenReturn("someUuid");
+ computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+ COMP_NAME, config);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
- verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(userInstanceDto),
- refEq(getUserComputationalResource(RUNNING, COMP_NAME)),
- eq(Collections.singletonList(new ClusterConfig())), eq(endpointDTO()));
- verify(requestId).put(USER, "someUuid");
- verify(computationalDAO).updateComputationalFields(refEq(new ComputationalStatusDTO()
- .withConfig(config)
- .withUser(USER)
- .withExploratoryName(EXPLORATORY_NAME)
- .withComputationalName(COMP_NAME)
- .withStatus(UserInstanceStatus.RECONFIGURING.toString()), "self"));
- verify(provisioningService).post(eq(endpointDTO().getUrl() + "computational/spark/reconfigure"),
- eq(getUserInfo().getAccessToken()),
- refEq(new ComputationalClusterConfigDTO()), eq(String.class));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+ verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(userInstanceDto),
+ refEq(getUserComputationalResource(RUNNING, COMP_NAME)),
+ eq(Collections.singletonList(new ClusterConfig())), eq(endpointDTO()));
+ verify(requestId).put(USER, "someUuid");
+ verify(computationalDAO).updateComputationalFields(refEq(new ComputationalStatusDTO()
+ .withProject(PROJECT)
+ .withConfig(config)
+ .withUser(USER)
+ .withExploratoryName(EXPLORATORY_NAME)
+ .withComputationalName(COMP_NAME)
+ .withStatus(UserInstanceStatus.RECONFIGURING.toString()), "self"));
+ verify(provisioningService).post(eq(endpointDTO().getUrl() + "computational/spark/reconfigure"),
+ eq(getUserInfo().getAccessToken()),
+ refEq(new ComputationalClusterConfigDTO()), eq(String.class));
- }
+ }
@Test
public void testUpdateSparkClusterConfigWhenClusterIsNotRunning() {
- final UserInstanceDTO userInstanceDto = getUserInstanceDto();
- final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
- userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
- try {
- computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME,
- config);
- } catch (ResourceNotFoundException e) {
- assertEquals("Running computational resource with name compName for exploratory expName not found",
- e.getMessage());
- }
+ final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+ final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+ userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+ try {
+ computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+ COMP_NAME, config);
+ } catch (ResourceNotFoundException e) {
+ assertEquals("Running computational resource with name compName for exploratory expName not found",
+ e.getMessage());
+ }
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
- verifyNoMoreInteractions(exploratoryDAO);
- verifyZeroInteractions(provisioningService, requestBuilder, requestId);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+ verifyNoMoreInteractions(exploratoryDAO);
+ verifyZeroInteractions(provisioningService, requestBuilder, requestId);
- }
+ }
@Test
public void testUpdateSparkClusterConfigWhenClusterIsNotFound() {
- final UserInstanceDTO userInstanceDto = getUserInstanceDto();
- final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
- userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
- try {
- computationalService.updateSparkClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME + "X",
- config);
- } catch (ResourceNotFoundException e) {
- assertEquals("Running computational resource with name compNameX for exploratory expName not found",
- e.getMessage());
- }
+ final UserInstanceDTO userInstanceDto = getUserInstanceDto();
+ final List<ClusterConfig> config = Collections.singletonList(new ClusterConfig());
+ userInstanceDto.setResources(Collections.singletonList(getUserComputationalResource(STOPPED, COMP_NAME)));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(userInstanceDto);
+ try {
+ computationalService.updateSparkClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+ COMP_NAME + "X", config);
+ } catch (ResourceNotFoundException e) {
+ assertEquals("Running computational resource with name compNameX for exploratory expName not found",
+ e.getMessage());
+ }
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, true);
- verifyNoMoreInteractions(exploratoryDAO);
- verifyZeroInteractions(provisioningService, requestBuilder, requestId);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, true);
+ verifyNoMoreInteractions(exploratoryDAO);
+ verifyZeroInteractions(provisioningService, requestBuilder, requestId);
- }
+ }
@Test
public void testGetClusterConfig() {
- when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
+ when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
- final List<ClusterConfig> clusterConfig = computationalService.getClusterConfig(getUserInfo(),
- EXPLORATORY_NAME, COMP_NAME);
- final ClusterConfig config = clusterConfig.get(0);
+ final List<ClusterConfig> clusterConfig = computationalService.getClusterConfig(getUserInfo(), PROJECT,
+ EXPLORATORY_NAME, COMP_NAME);
+ final ClusterConfig config = clusterConfig.get(0);
- assertEquals(1, clusterConfig.size());
- assertEquals("test", config.getClassification());
- assertNull(config.getConfigurations());
- assertNull(config.getProperties());
- }
+ assertEquals(1, clusterConfig.size());
+ assertEquals("test", config.getClassification());
+ assertNull(config.getConfigurations());
+ assertNull(config.getProperties());
+ }
@Test
public void testGetClusterConfigWithException() {
- when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString())).thenThrow(new RuntimeException(
- "Exception"));
+ when(computationalDAO.getClusterConfig(anyString(), anyString(), anyString(), anyString())).thenThrow(new RuntimeException(
+ "Exception"));
- expectedException.expectMessage("Exception");
- expectedException.expect(RuntimeException.class);
- computationalService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME, COMP_NAME);
- }
+ expectedException.expectMessage("Exception");
+ expectedException.expect(RuntimeException.class);
+ computationalService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME, COMP_NAME);
+ }
private ClusterConfig getClusterConfig() {
final ClusterConfig config = new ClusterConfig();
@@ -705,26 +711,29 @@
private UserInstanceDTO getUserInstanceDto() {
return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
- .withExploratoryId("explId")
+ .withExploratoryId("explId")
+ .withProject(PROJECT)
.withTags(Collections.emptyMap());
}
private List<ComputationalCreateFormDTO> getFormList() {
- SparkStandaloneClusterCreateForm sparkClusterForm = new SparkStandaloneClusterCreateForm();
- sparkClusterForm.setNotebookName(EXPLORATORY_NAME);
- sparkClusterForm.setName(COMP_NAME);
- sparkClusterForm.setDataEngineInstanceCount(String.valueOf(2));
- sparkClusterForm.setImage("dataengine");
- ComputationalCreateFormDTO desClusterForm = new ComputationalCreateFormDTO();
- desClusterForm.setNotebookName(EXPLORATORY_NAME);
- desClusterForm.setName(COMP_NAME);
+ SparkStandaloneClusterCreateForm sparkClusterForm = new SparkStandaloneClusterCreateForm();
+ sparkClusterForm.setNotebookName(EXPLORATORY_NAME);
+ sparkClusterForm.setName(COMP_NAME);
+ sparkClusterForm.setProject(PROJECT);
+ sparkClusterForm.setDataEngineInstanceCount(String.valueOf(2));
+ sparkClusterForm.setImage("dataengine");
+ ComputationalCreateFormDTO desClusterForm = new ComputationalCreateFormDTO();
+ desClusterForm.setNotebookName(EXPLORATORY_NAME);
+ desClusterForm.setName(COMP_NAME);
- return Arrays.asList(sparkClusterForm, desClusterForm);
- }
+ return Arrays.asList(sparkClusterForm, desClusterForm);
+ }
private ComputationalStatusDTO getComputationalStatusDTOWithStatus(String status) {
return new ComputationalStatusDTO()
- .withUser(USER)
+ .withUser(USER)
+ .withProject(PROJECT)
.withExploratoryName(EXPLORATORY_NAME)
.withComputationalName(COMP_NAME)
.withStatus(UserInstanceStatus.of(status));
@@ -762,8 +771,8 @@
}
private ProjectDTO getProjectDTO() {
- return new ProjectDTO("project", Collections.emptySet(), "", "", null,
- singletonList(new ProjectEndpointDTO("endpoint", UserInstanceStatus.RUNNING,
- new EdgeInfo())), true);
- }
+ return new ProjectDTO(PROJECT, Collections.emptySet(), "", "", null,
+ singletonList(new ProjectEndpointDTO("endpoint", UserInstanceStatus.RUNNING,
+ new EdgeInfo())), true);
+ }
}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
index 5396ca8..dca6e0f 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/EnvironmentServiceImplTest.java
@@ -43,11 +43,27 @@
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anySet;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.anyVararg;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class EnvironmentServiceImplTest {
@@ -57,8 +73,6 @@
private static final String EXPLORATORY_NAME_2 = "expName2";
private static final String TOKEN = "token";
private static final String UUID = "213-12312-321";
- private static final String RUNNING_STATE = "running";
- private static final String STOPPED_STATE = "stopped";
private static final String PROJECT_NAME = "projectName";
private static final String ENDPOINT_NAME = "endpointName";
private static final String ADMIN = "admin";
@@ -139,13 +153,13 @@
public void stopEnvironment() {
final UserInfo userInfo = getUserInfo();
when(exploratoryDAO.fetchRunningExploratoryFields(anyString())).thenReturn(getUserInstances());
- when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+ when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
- environmentService.stopEnvironment(userInfo, USER);
+ environmentService.stopEnvironment(userInfo, USER, PROJECT_NAME);
verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
- verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
- verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
+ verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
+ verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_2));
verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
UserInstanceStatus.CREATING,
@@ -160,20 +174,20 @@
.thenReturn(getUserInstances());
expectedException.expect(ResourceConflictException.class);
- environmentService.stopEnvironment(getUserInfo(), USER);
+ environmentService.stopEnvironment(getUserInfo(), USER, PROJECT_NAME);
}
@Test
public void stopEnvironmentWithoutEdge() {
final UserInfo userInfo = getUserInfo();
when(exploratoryDAO.fetchRunningExploratoryFields(anyString())).thenReturn(getUserInstances());
- when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+ when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
- environmentService.stopEnvironment(userInfo, USER);
+ environmentService.stopEnvironment(userInfo, USER, PROJECT_NAME);
verify(exploratoryDAO).fetchRunningExploratoryFields(USER);
- verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
- verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
+ verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
+ verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_2));
verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, Arrays.asList(UserInstanceStatus.CREATING,
UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE),
UserInstanceStatus.CREATING, UserInstanceStatus.STARTING, UserInstanceStatus.CREATING_IMAGE);
@@ -186,15 +200,15 @@
final ProjectDTO projectDTO = getProjectDTO();
when(exploratoryDAO.fetchRunningExploratoryFieldsForProject(anyString())).thenReturn(getUserInstances());
when(securityService.getServiceAccountInfo(anyString())).thenReturn(userInfo);
- when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+ when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
when(projectService.get(anyString())).thenReturn(projectDTO);
doNothing().when(projectService).stop(any(UserInfo.class), anyString(), anyString());
environmentService.stopProjectEnvironment(PROJECT_NAME);
verify(exploratoryDAO).fetchRunningExploratoryFieldsForProject(PROJECT_NAME);
- verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
- verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_2));
+ verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
+ verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_2));
verify(securityService, times(2)).getServiceAccountInfo(USER);
verify(securityService).getServiceAccountInfo(ADMIN);
verify(projectService).get(eq(PROJECT_NAME));
@@ -208,33 +222,33 @@
@Test
public void stopExploratory() {
final UserInfo userInfo = getUserInfo();
- when(exploratoryService.stop(any(UserInfo.class), anyString())).thenReturn(UUID);
+ when(exploratoryService.stop(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
- environmentService.stopExploratory(new UserInfo(USER, TOKEN), USER, EXPLORATORY_NAME_1);
+ environmentService.stopExploratory(new UserInfo(USER, TOKEN), USER, PROJECT_NAME, EXPLORATORY_NAME_1);
- verify(exploratoryService).stop(refEq(userInfo), eq(EXPLORATORY_NAME_1));
+ verify(exploratoryService).stop(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
verifyNoMoreInteractions(securityService, exploratoryService);
}
@Test
public void stopComputational() {
final UserInfo userInfo = getUserInfo();
- doNothing().when(computationalService).stopSparkCluster(any(UserInfo.class), anyString(), anyString());
+ doNothing().when(computationalService).stopSparkCluster(any(UserInfo.class), anyString(), anyString(), anyString());
- environmentService.stopComputational(userInfo, USER, EXPLORATORY_NAME_1, "compName");
+ environmentService.stopComputational(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1, "compName");
- verify(computationalService).stopSparkCluster(refEq(userInfo), eq(EXPLORATORY_NAME_1), eq("compName"));
+ verify(computationalService).stopSparkCluster(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1), eq("compName"));
verifyNoMoreInteractions(securityService, computationalService);
}
@Test
public void terminateExploratory() {
final UserInfo userInfo = getUserInfo();
- when(exploratoryService.terminate(any(UserInfo.class), anyString())).thenReturn(UUID);
+ when(exploratoryService.terminate(any(UserInfo.class), anyString(), anyString())).thenReturn(UUID);
- environmentService.terminateExploratory(userInfo, USER, EXPLORATORY_NAME_1);
+ environmentService.terminateExploratory(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1);
- verify(exploratoryService).terminate(refEq(userInfo), eq(EXPLORATORY_NAME_1));
+ verify(exploratoryService).terminate(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1));
verifyNoMoreInteractions(securityService, exploratoryService);
}
@@ -242,12 +256,12 @@
public void terminateComputational() {
final UserInfo userInfo = getUserInfo();
doNothing().when(computationalService)
- .terminateComputational(any(UserInfo.class), anyString(), anyString());
+ .terminateComputational(any(UserInfo.class), anyString(), anyString(), anyString());
- environmentService.terminateComputational(userInfo, USER, EXPLORATORY_NAME_1, "compName");
+ environmentService.terminateComputational(userInfo, USER, PROJECT_NAME, EXPLORATORY_NAME_1, "compName");
verify(computationalService)
- .terminateComputational(refEq(userInfo), eq(EXPLORATORY_NAME_1), eq("compName"));
+ .terminateComputational(refEq(userInfo), eq(PROJECT_NAME), eq(EXPLORATORY_NAME_1), eq("compName"));
verifyNoMoreInteractions(securityService, computationalService);
}
@@ -257,8 +271,8 @@
private List<UserInstanceDTO> getUserInstances() {
return Arrays.asList(
- new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_1).withUser(USER).withProject("prj"),
- new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_2).withUser(USER).withProject("prj"));
+ new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_1).withUser(USER).withProject(PROJECT_NAME),
+ new UserInstanceDTO().withExploratoryName(EXPLORATORY_NAME_2).withUser(USER).withProject(PROJECT_NAME));
}
private ProjectDTO getProjectDTO() {
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
index e7d718d..5d21167 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ExploratoryServiceImplTest.java
@@ -66,7 +66,6 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyListOf;
import static org.mockito.Mockito.anyMapOf;
import static org.mockito.Mockito.anyString;
@@ -85,26 +84,27 @@
@RunWith(MockitoJUnitRunner.class)
public class ExploratoryServiceImplTest {
- private final String USER = "test";
- private final String TOKEN = "token";
- private final String EXPLORATORY_NAME = "expName";
- private final String UUID = "1234-56789765-4321";
- private static final String ENDPOINT_NAME = "endpointName";
+ private final String USER = "test";
+ private final String TOKEN = "token";
+ private final String PROJECT = "project";
+ private final String EXPLORATORY_NAME = "expName";
+ private final String UUID = "1234-56789765-4321";
+ private static final String ENDPOINT_NAME = "endpointName";
- private UserInfo userInfo;
- private UserInstanceDTO userInstance;
- private StatusEnvBaseDTO statusEnvBaseDTO;
+ private UserInfo userInfo;
+ private UserInstanceDTO userInstance;
+ private StatusEnvBaseDTO statusEnvBaseDTO;
- @Mock
- private ProjectService projectService;
- @Mock
- private ExploratoryDAO exploratoryDAO;
- @Mock
- private ComputationalDAO computationalDAO;
- @Mock
- private GitCredsDAO gitCredsDAO;
- @Mock
+ @Mock
+ private ProjectService projectService;
+ @Mock
+ private ExploratoryDAO exploratoryDAO;
+ @Mock
+ private ComputationalDAO computationalDAO;
+ @Mock
+ private GitCredsDAO gitCredsDAO;
+ @Mock
private RESTService provisioningService;
@Mock
private RequestBuilder requestBuilder;
@@ -129,162 +129,162 @@
@Test
public void start() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- ExploratoryGitCredsDTO egcDtoMock = mock(ExploratoryGitCredsDTO.class);
- when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDtoMock);
+ ExploratoryGitCredsDTO egcDtoMock = mock(ExploratoryGitCredsDTO.class);
+ when(gitCredsDAO.findGitCreds(anyString())).thenReturn(egcDtoMock);
- ExploratoryActionDTO egcuDto = new ExploratoryGitCredsUpdateDTO();
- egcuDto.withExploratoryName(EXPLORATORY_NAME);
- when(requestBuilder.newExploratoryStart(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
- any(ExploratoryGitCredsDTO.class))).thenReturn(egcuDto);
+ ExploratoryActionDTO egcuDto = new ExploratoryGitCredsUpdateDTO();
+ egcuDto.withExploratoryName(EXPLORATORY_NAME);
+ when(requestBuilder.newExploratoryStart(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+ any(ExploratoryGitCredsDTO.class))).thenReturn(egcuDto);
- String exploratoryStart = "exploratory/start";
- when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any()))
- .thenReturn(UUID);
- when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+ String exploratoryStart = "exploratory/start";
+ when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any()))
+ .thenReturn(UUID);
+ when(requestId.put(anyString(), anyString())).thenReturn(UUID);
- String uuid = exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
- assertNotNull(uuid);
- assertEquals(UUID, uuid);
+ String uuid = exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
+ assertNotNull(uuid);
+ assertEquals(UUID, uuid);
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStart, TOKEN, egcuDto, String.class);
- verify(requestId).put(USER, UUID);
- verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
- }
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStart, TOKEN, egcuDto, String.class);
+ verify(requestId).put(USER, UUID);
+ verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
+ }
@Test
public void startWhenMethodFetchExploratoryFieldsThrowsException() {
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
- doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
- .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
- try {
- exploratoryService.start(userInfo, EXPLORATORY_NAME, "project");
- } catch (DlabException e) {
- assertEquals("Could not start exploratory environment expName: Exploratory for user with " +
- "name not found", e.getMessage());
- }
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+ doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+ .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+ try {
+ exploratoryService.start(userInfo, EXPLORATORY_NAME, PROJECT);
+ } catch (DlabException e) {
+ assertEquals("Could not start exploratory environment expName: Exploratory for user with " +
+ "name not found", e.getMessage());
+ }
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("starting");
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void stop() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
- when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+ when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
- eaDto.withExploratoryName(EXPLORATORY_NAME);
- when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
- .thenReturn(eaDto);
+ ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
+ eaDto.withExploratoryName(EXPLORATORY_NAME);
+ when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
+ .thenReturn(eaDto);
- String exploratoryStop = "exploratory/stop";
- when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
- (UUID);
- when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+ String exploratoryStop = "exploratory/stop";
+ when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
+ (UUID);
+ when(requestId.put(anyString(), anyString())).thenReturn(UUID);
- String uuid = exploratoryService.stop(userInfo, EXPLORATORY_NAME);
- assertNotNull(uuid);
- assertEquals(UUID, uuid);
+ String uuid = exploratoryService.stop(userInfo, PROJECT, EXPLORATORY_NAME);
+ assertNotNull(uuid);
+ assertEquals(UUID, uuid);
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStop, TOKEN, eaDto, String.class);
- verify(computationalDAO).updateComputationalStatusesForExploratory(userInfo.getName(), EXPLORATORY_NAME,
- UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING, UserInstanceStatus.FAILED,
- UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
- verify(requestId).put(USER, UUID);
- verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
- }
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(provisioningService).post(endpointDTO().getUrl() + exploratoryStop, TOKEN, eaDto, String.class);
+ verify(computationalDAO).updateComputationalStatusesForExploratory(userInfo.getName(), PROJECT,
+ EXPLORATORY_NAME, UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING,
+ UserInstanceStatus.FAILED, UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
+ verify(requestId).put(USER, UUID);
+ verifyNoMoreInteractions(exploratoryDAO, provisioningService, requestId);
+ }
@Test
public void stopWhenMethodFetchExploratoryFieldsThrowsException() {
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
- doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
- .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
- try {
- exploratoryService.stop(userInfo, EXPLORATORY_NAME);
- } catch (DlabException e) {
- assertEquals("Could not stop exploratory environment expName: Exploratory for user with " +
- "name not found", e.getMessage());
- }
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+ doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+ .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+ try {
+ exploratoryService.stop(userInfo, PROJECT, EXPLORATORY_NAME);
+ } catch (DlabException e) {
+ assertEquals("Could not stop exploratory environment expName: Exploratory for user with " +
+ "name not found", e.getMessage());
+ }
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void terminate() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
- when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+ when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class))).thenReturn(1);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
- eaDto.withExploratoryName(EXPLORATORY_NAME);
- when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
- .thenReturn(eaDto);
+ ExploratoryActionDTO eaDto = new ExploratoryActionDTO();
+ eaDto.withExploratoryName(EXPLORATORY_NAME);
+ when(requestBuilder.newExploratoryStop(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class)))
+ .thenReturn(eaDto);
- String exploratoryTerminate = "exploratory/terminate";
- when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
- (UUID);
- when(requestId.put(anyString(), anyString())).thenReturn(UUID);
+ String exploratoryTerminate = "exploratory/terminate";
+ when(provisioningService.post(anyString(), anyString(), any(ExploratoryActionDTO.class), any())).thenReturn
+ (UUID);
+ when(requestId.put(anyString(), anyString())).thenReturn(UUID);
- String uuid = exploratoryService.terminate(userInfo, EXPLORATORY_NAME);
- assertNotNull(uuid);
- assertEquals(UUID, uuid);
+ String uuid = exploratoryService.terminate(userInfo, PROJECT, EXPLORATORY_NAME);
+ assertNotNull(uuid);
+ assertEquals(UUID, uuid);
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME, UserInstanceStatus
- .TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED,
- UserInstanceStatus.FAILED);
- verify(requestBuilder).newExploratoryStop(userInfo, userInstance, endpointDTO());
- verify(provisioningService).post(endpointDTO().getUrl() + exploratoryTerminate, TOKEN, eaDto, String.class);
- verify(requestId).put(USER, UUID);
- verifyNoMoreInteractions(exploratoryDAO, computationalDAO, requestBuilder, provisioningService, requestId);
- }
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(computationalDAO).updateComputationalStatusesForExploratory(USER, PROJECT, EXPLORATORY_NAME,
+ UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED,
+ UserInstanceStatus.FAILED);
+ verify(requestBuilder).newExploratoryStop(userInfo, userInstance, endpointDTO());
+ verify(provisioningService).post(endpointDTO().getUrl() + exploratoryTerminate, TOKEN, eaDto, String.class);
+ verify(requestId).put(USER, UUID);
+ verifyNoMoreInteractions(exploratoryDAO, computationalDAO, requestBuilder, provisioningService, requestId);
+ }
@Test
public void terminateWhenMethodFetchExploratoryFieldsThrowsException() {
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
- doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
- .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
- try {
- exploratoryService.terminate(userInfo, EXPLORATORY_NAME);
- } catch (DlabException e) {
- assertEquals("Could not terminate exploratory environment expName: Exploratory for user " +
- "with name not found", e.getMessage());
- }
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+ doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
+ .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
+ try {
+ exploratoryService.terminate(userInfo, PROJECT, EXPLORATORY_NAME);
+ } catch (DlabException e) {
+ assertEquals("Could not terminate exploratory environment expName: Exploratory for user " +
+ "with name not found", e.getMessage());
+ }
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("failed");
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void create() {
@@ -389,202 +389,116 @@
}
@Test
- public void updateExploratoryStatusesWithRunningStatus() {
- when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
- .thenReturn(singletonList(userInstance));
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
- .thenReturn(mock(UpdateResult.class));
-
- exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.RUNNING);
-
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("running");
-
- verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
- UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verifyNoMoreInteractions(exploratoryDAO);
- }
-
- @Test
- public void updateExploratoryStatusesWithStoppingStatus() {
- userInstance.setStatus("stopping");
- when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
- .thenReturn(singletonList(userInstance));
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
- .thenReturn(mock(UpdateResult.class));
- doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
- any(UserInstanceStatus.class), any(UserInstanceStatus.class));
-
- exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.STOPPING);
-
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("stopping");
-
- verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
- UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME,
- UserInstanceStatus.STOPPING, UserInstanceStatus.TERMINATING, UserInstanceStatus.FAILED,
- UserInstanceStatus.TERMINATED, UserInstanceStatus.STOPPED);
- verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
- }
-
- @Test
- public void updateExploratoryStatusesWithTerminatingStatus() {
- userInstance.setStatus("terminating");
- when(exploratoryDAO.fetchUserExploratoriesWhereStatusNotIn(anyString(), anyVararg()))
- .thenReturn(singletonList(userInstance));
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class)))
- .thenReturn(mock(UpdateResult.class));
- when(computationalDAO.updateComputationalStatusesForExploratory(any(StatusEnvBaseDTO.class)))
- .thenReturn(10);
-
- exploratoryService.updateExploratoryStatuses(USER, UserInstanceStatus.TERMINATING);
-
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminating");
-
- verify(exploratoryDAO).fetchUserExploratoriesWhereStatusNotIn(USER,
- UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME, UserInstanceStatus
- .TERMINATING, UserInstanceStatus.TERMINATING, UserInstanceStatus.TERMINATED, UserInstanceStatus
- .FAILED);
- verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
- }
-
- @Test
public void updateProjectExploratoryStatuses() {
- when(exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(anyString(), anyString(), anyVararg()))
- .thenReturn(singletonList(userInstance));
- when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
- doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
- any(UserInstanceStatus.class), any(UserInstanceStatus.class), anyVararg());
+ when(exploratoryDAO.fetchProjectExploratoriesWhereStatusNotIn(anyString(), anyString(), anyVararg()))
+ .thenReturn(singletonList(userInstance));
+ when(exploratoryDAO.updateExploratoryStatus(any(StatusEnvBaseDTO.class))).thenReturn(mock(UpdateResult.class));
+ doNothing().when(computationalDAO).updateComputationalStatusesForExploratory(anyString(), anyString(),
+ anyString(), any(UserInstanceStatus.class), any(UserInstanceStatus.class), anyVararg());
- exploratoryService.updateProjectExploratoryStatuses("project", "endpoint",
- UserInstanceStatus.TERMINATED);
- statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminated");
+ exploratoryService.updateProjectExploratoryStatuses("project", "endpoint",
+ UserInstanceStatus.TERMINATED);
+ statusEnvBaseDTO = getStatusEnvBaseDTOWithStatus("terminated");
- verify(exploratoryDAO).fetchProjectExploratoriesWhereStatusNotIn("project", "endpoint",
- UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
- verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
- verify(computationalDAO).updateComputationalStatusesForExploratory(USER, EXPLORATORY_NAME,
- UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATED,
- UserInstanceStatus.FAILED);
+ verify(exploratoryDAO).fetchProjectExploratoriesWhereStatusNotIn("project", "endpoint",
+ UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
+ verify(exploratoryDAO).updateExploratoryStatus(refEq(statusEnvBaseDTO, "self"));
+ verify(computationalDAO).updateComputationalStatusesForExploratory(USER, PROJECT,
+ EXPLORATORY_NAME, UserInstanceStatus.TERMINATED, UserInstanceStatus.TERMINATED,
+ UserInstanceStatus.TERMINATED, UserInstanceStatus.FAILED);
- verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
- }
-
- @Test
- public void updateUserExploratoriesReuploadKeyFlag() {
- doNothing().when(exploratoryDAO).updateReuploadKeyForExploratories(anyString(), anyBoolean(),
- any(UserInstanceStatus.class));
-
- exploratoryService.updateExploratoriesReuploadKeyFlag(USER, true, UserInstanceStatus.RUNNING);
-
- verify(exploratoryDAO).updateReuploadKeyForExploratories(USER, true, UserInstanceStatus.RUNNING);
- verifyNoMoreInteractions(exploratoryDAO);
- }
-
- @Test
- public void getInstancesWithStatuses() {
- when(exploratoryDAO.fetchUserExploratoriesWhereStatusIn(anyString(), anyBoolean(), anyVararg()))
- .thenReturn(singletonList(userInstance));
- exploratoryService.getInstancesWithStatuses(USER, UserInstanceStatus.RUNNING, UserInstanceStatus.RUNNING);
-
- verify(exploratoryDAO).fetchUserExploratoriesWhereStatusIn(USER, true, UserInstanceStatus.RUNNING);
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
+ }
@Test
public void getUserInstance() {
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
- Optional<UserInstanceDTO> expectedInstance = Optional.of(userInstance);
- Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, EXPLORATORY_NAME);
- assertEquals(expectedInstance, actualInstance);
+ Optional<UserInstanceDTO> expectedInstance = Optional.of(userInstance);
+ Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, PROJECT, EXPLORATORY_NAME);
+ assertEquals(expectedInstance, actualInstance);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void getUserInstanceWithException() {
- doThrow(new ResourceNotFoundException("Exploratory for user not found"))
- .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+ doThrow(new ResourceNotFoundException("Exploratory for user not found"))
+ .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
- Optional<UserInstanceDTO> expectedInstance = Optional.empty();
- Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, EXPLORATORY_NAME);
- assertEquals(expectedInstance, actualInstance);
+ Optional<UserInstanceDTO> expectedInstance = Optional.empty();
+ Optional<UserInstanceDTO> actualInstance = exploratoryService.getUserInstance(USER, PROJECT, EXPLORATORY_NAME);
+ assertEquals(expectedInstance, actualInstance);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void testUpdateExploratoryClusterConfig() {
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
- anyListOf(ClusterConfig.class), any(EndpointDTO.class))).thenReturn(new ExploratoryReconfigureSparkClusterActionDTO());
- when(provisioningService.post(anyString(), anyString(), any(ExploratoryReconfigureSparkClusterActionDTO.class)
- , any())).thenReturn(UUID);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(requestBuilder.newClusterConfigUpdate(any(UserInfo.class), any(UserInstanceDTO.class),
+ anyListOf(ClusterConfig.class), any(EndpointDTO.class))).thenReturn(new ExploratoryReconfigureSparkClusterActionDTO());
+ when(provisioningService.post(anyString(), anyString(), any(ExploratoryReconfigureSparkClusterActionDTO.class)
+ , any())).thenReturn(UUID);
- exploratoryService.updateClusterConfig(getUserInfo(), EXPLORATORY_NAME, singletonList(new ClusterConfig()));
+ exploratoryService.updateClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME, singletonList(new ClusterConfig()));
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
- verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(getUserInstanceDto()),
- refEq(singletonList(new ClusterConfig())), refEq(endpointDTO()));
- verify(requestId).put(USER, UUID);
- verify(provisioningService).post(eq(endpointDTO().getUrl() + "exploratory/reconfigure_spark"), eq(TOKEN),
- refEq(new ExploratoryReconfigureSparkClusterActionDTO(), "self"), eq(String.class));
- verify(exploratoryDAO).updateExploratoryFields(refEq(new ExploratoryStatusDTO()
- .withUser(USER)
- .withConfig(singletonList(new ClusterConfig()))
- .withStatus(UserInstanceStatus.RECONFIGURING.toString())
- .withExploratoryName(EXPLORATORY_NAME), "self"));
- verifyNoMoreInteractions(requestBuilder, requestId, exploratoryDAO, provisioningService);
- }
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(requestBuilder).newClusterConfigUpdate(refEq(getUserInfo()), refEq(getUserInstanceDto()),
+ refEq(singletonList(new ClusterConfig())), refEq(endpointDTO()));
+ verify(requestId).put(USER, UUID);
+ verify(provisioningService).post(eq(endpointDTO().getUrl() + "exploratory/reconfigure_spark"), eq(TOKEN),
+ refEq(new ExploratoryReconfigureSparkClusterActionDTO(), "self"), eq(String.class));
+ verify(exploratoryDAO).updateExploratoryFields(refEq(new ExploratoryStatusDTO()
+ .withUser(USER)
+ .withProject(PROJECT)
+ .withConfig(singletonList(new ClusterConfig()))
+ .withStatus(UserInstanceStatus.RECONFIGURING.toString())
+ .withExploratoryName(EXPLORATORY_NAME), "self"));
+ verifyNoMoreInteractions(requestBuilder, requestId, exploratoryDAO, provisioningService);
+ }
@Test
public void testUpdateExploratoryClusterConfigWhenNotRunning() {
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenThrow(new ResourceNotFoundException("EXCEPTION"));
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenThrow(new ResourceNotFoundException("EXCEPTION"));
- try {
+ try {
- exploratoryService.updateClusterConfig(getUserInfo(), EXPLORATORY_NAME,
- singletonList(new ClusterConfig()));
- } catch (ResourceNotFoundException e) {
- assertEquals("EXCEPTION", e.getMessage());
- }
+ exploratoryService.updateClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME,
+ singletonList(new ClusterConfig()));
+ } catch (ResourceNotFoundException e) {
+ assertEquals("EXCEPTION", e.getMessage());
+ }
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(exploratoryDAO);
- verifyZeroInteractions(requestBuilder, requestId, provisioningService);
-
- }
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(exploratoryDAO);
+ verifyZeroInteractions(requestBuilder, requestId, provisioningService);
+ }
@Test
public void testGetClusterConfig() {
+ when(exploratoryDAO.getClusterConfig(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
+ final List<ClusterConfig> clusterConfig = exploratoryService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME);
- when(exploratoryDAO.getClusterConfig(anyString(), anyString())).thenReturn(Collections.singletonList(getClusterConfig()));
- final List<ClusterConfig> clusterConfig = exploratoryService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME);
+ assertEquals(1, clusterConfig.size());
+ assertEquals("classification", clusterConfig.get(0).getClassification());
- assertEquals(1, clusterConfig.size());
- assertEquals("classification", clusterConfig.get(0).getClassification());
-
- verify(exploratoryDAO).getClusterConfig(getUserInfo().getName(), EXPLORATORY_NAME);
- verifyNoMoreInteractions(exploratoryDAO);
- }
+ verify(exploratoryDAO).getClusterConfig(getUserInfo().getName(), PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(exploratoryDAO);
+ }
@Test
public void testGetClusterConfigWithException() {
+ when(exploratoryDAO.getClusterConfig(anyString(), anyString(), anyString())).thenThrow(new RuntimeException("Exception"));
- when(exploratoryDAO.getClusterConfig(anyString(), anyString())).thenThrow(new RuntimeException("Exception"));
-
- expectedException.expect(RuntimeException.class);
- expectedException.expectMessage("Exception");
- exploratoryService.getClusterConfig(getUserInfo(), EXPLORATORY_NAME);
- }
+ expectedException.expect(RuntimeException.class);
+ expectedException.expectMessage("Exception");
+ exploratoryService.getClusterConfig(getUserInfo(), PROJECT, EXPLORATORY_NAME);
+ }
private ClusterConfig getClusterConfig() {
final ClusterConfig config = new ClusterConfig();
@@ -597,21 +511,25 @@
}
private UserInstanceDTO getUserInstanceDto() {
- UserComputationalResource compResource = new UserComputationalResource();
- compResource.setImageName("YYYY.dataengine");
- compResource.setComputationalName("compName");
- compResource.setStatus("stopped");
- compResource.setComputationalId("compId");
- return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME).withStatus("running")
- .withResources(singletonList(compResource))
- .withTags(Collections.emptyMap())
- .withProject("project")
- .withEndpoint("test")
- .withCloudProvider(CloudProvider.AWS.toString());
- }
+ UserComputationalResource compResource = new UserComputationalResource();
+ compResource.setImageName("YYYY.dataengine");
+ compResource.setComputationalName("compName");
+ compResource.setStatus("stopped");
+ compResource.setComputationalId("compId");
+ return new UserInstanceDTO()
+ .withUser(USER)
+ .withExploratoryName(EXPLORATORY_NAME)
+ .withStatus("running")
+ .withResources(singletonList(compResource))
+ .withTags(Collections.emptyMap())
+ .withProject(PROJECT)
+ .withEndpoint("test")
+ .withCloudProvider(CloudProvider.AWS.toString());
+ }
private StatusEnvBaseDTO getStatusEnvBaseDTOWithStatus(String status) {
return new ExploratoryStatusDTO()
+ .withProject(PROJECT)
.withUser(USER)
.withExploratoryName(EXPLORATORY_NAME)
.withStatus(status);
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
index 7509971..b600879 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ImageExploratoryServiceImplTest.java
@@ -24,8 +24,10 @@
import com.epam.dlab.backendapi.dao.ExploratoryLibDAO;
import com.epam.dlab.backendapi.dao.ImageExploratoryDao;
import com.epam.dlab.backendapi.domain.EndpointDTO;
+import com.epam.dlab.backendapi.domain.ProjectDTO;
import com.epam.dlab.backendapi.resources.dto.ImageInfoRecord;
import com.epam.dlab.backendapi.service.EndpointService;
+import com.epam.dlab.backendapi.service.ProjectService;
import com.epam.dlab.backendapi.util.RequestBuilder;
import com.epam.dlab.cloud.CloudProvider;
import com.epam.dlab.dto.UserInstanceDTO;
@@ -56,7 +58,16 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.anyVararg;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class ImageExploratoryServiceImplTest {
@@ -82,6 +93,8 @@
private RequestBuilder requestBuilder;
@Mock
private EndpointService endpointService;
+ @Mock
+ private ProjectService projectService;
@InjectMocks
private ImageExploratoryServiceImpl imageExploratoryService;
@@ -98,95 +111,99 @@
@Test
public void createImage() {
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(projectService.get(anyString())).thenReturn(getProjectDTO());
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(false);
- when(libDAO.getLibraries(anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
+ when(libDAO.getLibraries(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
doNothing().when(imageExploratoryDao).save(any(Image.class));
when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
.thenReturn(mock(UpdateResult.class));
ExploratoryImageDTO eiDto = new ExploratoryImageDTO();
when(endpointService.get(anyString())).thenReturn(endpointDTO());
when(requestBuilder.newExploratoryImageCreate(any(UserInfo.class), any(UserInstanceDTO.class), anyString(),
- any(EndpointDTO.class))).thenReturn(eiDto);
+ any(EndpointDTO.class), any(ProjectDTO.class))).thenReturn(eiDto);
String expectedUuid = "someUuid";
when(provisioningService.post(anyString(), anyString(), any(ExploratoryImageDTO.class), any()))
.thenReturn(expectedUuid);
String imageName = "someImageName", imageDescription = "someDescription";
- String actualUuid = imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName,
- imageDescription);
+ String actualUuid = imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME,
+ imageName, imageDescription);
assertNotNull(actualUuid);
assertEquals(expectedUuid, actualUuid);
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(projectService).get(PROJECT);
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
verify(imageExploratoryDao).exist(imageName, PROJECT);
verify(imageExploratoryDao).save(any(Image.class));
- verify(libDAO).getLibraries(USER, EXPLORATORY_NAME);
- verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO());
+ verify(libDAO).getLibraries(USER, PROJECT, EXPLORATORY_NAME);
+ verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO(), getProjectDTO());
verify(endpointService).get(anyString());
verify(provisioningService).post(endpointDTO().getUrl() + "exploratory/image", TOKEN, eiDto, String.class);
- verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService, provisioningService);
+ verifyNoMoreInteractions(projectService, exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService, provisioningService);
}
@Test
public void createImageWhenMethodFetchRunningExploratoryFieldsThrowsException() {
doThrow(new DlabException("Running exploratory instance for user with name not found."))
- .when(exploratoryDAO).fetchRunningExploratoryFields(anyString(), anyString());
+ .when(exploratoryDAO).fetchRunningExploratoryFields(anyString(), anyString(), anyString());
String imageName = "someImageName", imageDescription = "someDescription";
try {
- imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+ imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
} catch (DlabException e) {
assertEquals("Running exploratory instance for user with name not found.", e.getMessage());
}
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
verifyNoMoreInteractions(exploratoryDAO);
}
@Test
public void createImageWhenResourceAlreadyExists() {
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(true);
expectedException.expect(ResourceAlreadyExistException.class);
expectedException.expectMessage("Image with name someImageName is already exist");
String imageName = "someImageName", imageDescription = "someDescription";
- imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+ imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
}
@Test
public void createImageWhenMethodNewExploratoryImageCreateThrowsException() {
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(projectService.get(anyString())).thenReturn(getProjectDTO());
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
when(imageExploratoryDao.exist(anyString(), anyString())).thenReturn(false);
- when(libDAO.getLibraries(anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
+ when(libDAO.getLibraries(anyString(), anyString(), anyString())).thenReturn(Collections.singletonList(getLibrary()));
doNothing().when(imageExploratoryDao).save(any(Image.class));
when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
.thenReturn(mock(UpdateResult.class));
doThrow(new DlabException("Cannot create instance of resource class")).when(requestBuilder)
- .newExploratoryImageCreate(any(UserInfo.class), any(UserInstanceDTO.class), anyString(), any(EndpointDTO.class));
+ .newExploratoryImageCreate(any(UserInfo.class), any(UserInstanceDTO.class), anyString(), any(EndpointDTO.class), any(ProjectDTO.class));
when(endpointService.get(anyString())).thenReturn(endpointDTO());
String imageName = "someImageName", imageDescription = "someDescription";
try {
- imageExploratoryService.createImage(userInfo, EXPLORATORY_NAME, imageName, imageDescription);
+ imageExploratoryService.createImage(userInfo, PROJECT, EXPLORATORY_NAME, imageName, imageDescription);
} catch (DlabException e) {
assertEquals("Cannot create instance of resource class", e.getMessage());
}
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(projectService).get(PROJECT);
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
verify(imageExploratoryDao).exist(imageName, PROJECT);
verify(imageExploratoryDao).save(any(Image.class));
- verify(libDAO).getLibraries(USER, EXPLORATORY_NAME);
- verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO());
+ verify(libDAO).getLibraries(USER, PROJECT, EXPLORATORY_NAME);
+ verify(requestBuilder).newExploratoryImageCreate(userInfo, userInstance, imageName, endpointDTO(), getProjectDTO());
verify(endpointService).get(anyString());
- verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService);
+ verifyNoMoreInteractions(projectService, exploratoryDAO, imageExploratoryDao, libDAO, requestBuilder, endpointService);
}
@Test
@@ -194,13 +211,13 @@
when(exploratoryDAO.updateExploratoryStatus(any(ExploratoryStatusDTO.class)))
.thenReturn(mock(UpdateResult.class));
doNothing().when(imageExploratoryDao).updateImageFields(any(Image.class));
- doNothing().when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString());
+ doNothing().when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString(), anyString());
String notebookIp = "someIp";
imageExploratoryService.finishImageCreate(image, EXPLORATORY_NAME, notebookIp);
verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
- verify(exploratoryDAO).updateExploratoryIp(USER, notebookIp, EXPLORATORY_NAME);
+ verify(exploratoryDAO).updateExploratoryIp(USER, PROJECT, notebookIp, EXPLORATORY_NAME);
verify(imageExploratoryDao).updateImageFields(image);
verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
}
@@ -211,7 +228,7 @@
.thenReturn(mock(UpdateResult.class));
doNothing().when(imageExploratoryDao).updateImageFields(any(Image.class));
doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
- .when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString());
+ .when(exploratoryDAO).updateExploratoryIp(anyString(), anyString(), anyString(), anyString());
String notebookIp = "someIp";
try {
@@ -221,7 +238,7 @@
}
verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
- verify(exploratoryDAO).updateExploratoryIp(USER, notebookIp, EXPLORATORY_NAME);
+ verify(exploratoryDAO).updateExploratoryIp(USER, PROJECT, notebookIp, EXPLORATORY_NAME);
verify(imageExploratoryDao).updateImageFields(image);
verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
}
@@ -235,7 +252,7 @@
imageExploratoryService.finishImageCreate(image, EXPLORATORY_NAME, null);
verify(exploratoryDAO).updateExploratoryStatus(any(ExploratoryStatusDTO.class));
- verify(exploratoryDAO, never()).updateExploratoryIp(USER, null, EXPLORATORY_NAME);
+ verify(exploratoryDAO, never()).updateExploratoryIp(USER, PROJECT, null, EXPLORATORY_NAME);
verify(imageExploratoryDao).updateImageFields(image);
verifyNoMoreInteractions(exploratoryDAO, imageExploratoryDao);
}
@@ -301,6 +318,7 @@
.description("someDescription")
.status(ImageStatus.CREATING)
.user(USER)
+ .project(PROJECT)
.libraries(Collections.singletonList(getLibrary()))
.computationalLibraries(Collections.emptyMap())
.dockerImage("someImageName")
@@ -313,8 +331,11 @@
}
private UserInstanceDTO getUserInstanceDto() {
- return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
- .withExploratoryId("explId").withProject(PROJECT);
+ return new UserInstanceDTO()
+ .withUser(USER)
+ .withExploratoryName(EXPLORATORY_NAME)
+ .withExploratoryId("explId")
+ .withProject(PROJECT);
}
private UserInfo getUserInfo() {
@@ -324,4 +345,8 @@
private EndpointDTO endpointDTO() {
return new EndpointDTO("test", "url", "", null, EndpointDTO.EndpointStatus.ACTIVE, CloudProvider.AWS);
}
+
+ private ProjectDTO getProjectDTO() {
+ return ProjectDTO.builder().name(PROJECT).build();
+ }
}
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
index bc3ecbf..3677929 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/LibraryServiceImplTest.java
@@ -56,38 +56,46 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyListOf;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class LibraryServiceImplTest {
- private static final String LIB_NAME = "name";
- private static final String LIB_GROUP = "group";
- private static final String LIB_VERSION = "version";
- private static final String UUID = "id";
- private final String USER = "test";
- private final String EXPLORATORY_NAME = "explName";
- private final String COMPUTATIONAL_NAME = "compName";
+ private static final String LIB_NAME = "name";
+ private static final String LIB_GROUP = "group";
+ private static final String LIB_VERSION = "version";
+ private static final String UUID = "id";
+ private final String USER = "test";
+ private final String EXPLORATORY_NAME = "explName";
+ private final String PROJECT = "projectName";
+ private final String COMPUTATIONAL_NAME = "compName";
- private LibInstallDTO liDto;
- private List<LibInstallDTO> libs;
- private LibInstallFormDTO libInstallFormDTO;
- private LibraryInstallDTO libraryInstallDto;
+ private LibInstallDTO liDto;
+ private List<LibInstallDTO> libs;
+ private LibInstallFormDTO libInstallFormDTO;
+ private LibraryInstallDTO libraryInstallDto;
- @Mock
- private ExploratoryDAO exploratoryDAO;
- @Mock
- private ExploratoryLibDAO libraryDAO;
- @Mock
- private RequestBuilder requestBuilder;
- @Mock
- private RequestId requestId;
- @Mock
- private RESTService provisioningService;
- @Mock
- private EndpointService endpointService;
+ @Mock
+ private ExploratoryDAO exploratoryDAO;
+ @Mock
+ private ExploratoryLibDAO libraryDAO;
+ @Mock
+ private RequestBuilder requestBuilder;
+ @Mock
+ private RequestId requestId;
+ @Mock
+ private RESTService provisioningService;
+ @Mock
+ private EndpointService endpointService;
- @Rule
+ @Rule
public ExpectedException expectedException = ExpectedException.none();
@InjectMocks
@@ -100,243 +108,245 @@
@Test
public void testGetLibs() {
- Document document = new Document();
- when(libraryDAO.findExploratoryLibraries(anyString(), anyString())).thenReturn(document);
+ Document document = new Document();
+ when(libraryDAO.findExploratoryLibraries(anyString(), anyString(), anyString())).thenReturn(document);
- List<Document> expectedList = new ArrayList<>();
- List<Document> actualList = libraryService.getLibs(USER, EXPLORATORY_NAME, "");
- assertNotNull(actualList);
- assertEquals(expectedList, actualList);
+ List<Document> expectedList = new ArrayList<>();
+ List<Document> actualList = libraryService.getLibs(USER, PROJECT, EXPLORATORY_NAME, "");
+ assertNotNull(actualList);
+ assertEquals(expectedList, actualList);
- verify(libraryDAO).findExploratoryLibraries(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(libraryDAO);
- }
+ verify(libraryDAO).findExploratoryLibraries(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(libraryDAO);
+ }
@Test
public void getLibInfo() {
- Document document = new Document();
- when(libraryDAO.findAllLibraries(anyString(), anyString())).thenReturn(document);
+ Document document = new Document();
+ when(libraryDAO.findAllLibraries(anyString(), anyString(), anyString())).thenReturn(document);
- List<LibInfoRecord> expectedList = new ArrayList<>();
- List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, EXPLORATORY_NAME);
- assertNotNull(actualList);
- assertEquals(expectedList, actualList);
+ List<LibInfoRecord> expectedList = new ArrayList<>();
+ List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, PROJECT, EXPLORATORY_NAME);
+ assertNotNull(actualList);
+ assertEquals(expectedList, actualList);
- verify(libraryDAO).findAllLibraries(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(libraryDAO);
- }
+ verify(libraryDAO).findAllLibraries(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(libraryDAO);
+ }
@Test
public void getLibInfoWhenListsOfExploratoryAndComputationalLibsAreNotEmpty() {
- when(libraryDAO.findAllLibraries(anyString(), anyString()))
- .thenReturn(getDocumentWithExploratoryAndComputationalLibs());
+ when(libraryDAO.findAllLibraries(anyString(), anyString(), anyString()))
+ .thenReturn(getDocumentWithExploratoryAndComputationalLibs());
- List<LibInfoRecord> expectedList = getLibInfoRecordList();
- List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, EXPLORATORY_NAME);
- assertNotNull(actualList);
- assertEquals(expectedList, actualList);
+ List<LibInfoRecord> expectedList = getLibInfoRecordList();
+ List<LibInfoRecord> actualList = libraryService.getLibInfo(USER, PROJECT, EXPLORATORY_NAME);
+ assertNotNull(actualList);
+ assertEquals(expectedList, actualList);
- verify(libraryDAO).findAllLibraries(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(libraryDAO);
- }
+ verify(libraryDAO).findAllLibraries(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(libraryDAO);
+ }
@Test
public void installComputationalLibsWithoutOverride() {
- final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
- final List<LibInstallDTO> libsToInstall = getLibs("installing");
- libraryInstallDTO.setLibs(libsToInstall);
- final UserInfo user = getUser();
+ final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+ final List<LibInstallDTO> libsToInstall = getLibs("installing");
+ libraryInstallDTO.setLibs(libsToInstall);
+ final UserInfo user = getUser();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
- when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
- any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class))).thenReturn(libraryInstallDTO);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+ when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+ any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class))).thenReturn(libraryInstallDTO);
- final String uuid = libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
- getLibs(null));
+ final String uuid = libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+ COMPUTATIONAL_NAME, getLibs(null));
- assertEquals(UUID, uuid);
+ assertEquals(UUID, uuid);
- verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
- verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
- refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
- verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"), eq(user.getAccessToken()),
- refEq(libraryInstallDTO), eq(String.class));
- verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME),
- refEq(libsToInstall.get(0)), eq(false));
- verify(requestId).put(user.getName(), UUID);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
- verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
- }
+ verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+ verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
+ refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
+ verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"), eq(user.getAccessToken()),
+ refEq(libraryInstallDTO), eq(String.class));
+ verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
+ eq(COMPUTATIONAL_NAME), refEq(libsToInstall.get(0)), eq(false));
+ verify(requestId).put(user.getName(), UUID);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+ }
@Test
public void installComputationalLibsWhenComputationalNotFound() {
- final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
- final List<LibInstallDTO> libsToInstall = getLibs("installing");
- libraryInstallDTO.setLibs(libsToInstall);
- final UserInfo user = getUser();
+ final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+ final List<LibInstallDTO> libsToInstall = getLibs("installing");
+ libraryInstallDTO.setLibs(libsToInstall);
+ libraryInstallDTO.setProject(PROJECT);
+ final UserInfo user = getUser();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
- when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
- any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
- .thenReturn(libraryInstallDTO);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+ when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+ any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+ .thenReturn(libraryInstallDTO);
- expectedException.expect(DlabException.class);
- expectedException.expectMessage("Computational with name " + COMPUTATIONAL_NAME + "X was not found");
+ expectedException.expect(DlabException.class);
+ expectedException.expectMessage("Computational with name " + COMPUTATIONAL_NAME + "X was not found");
- libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME + "X",
- getLibs(null));
- }
+ libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+ COMPUTATIONAL_NAME + "X", getLibs(null));
+ }
@Test
public void installComputationalLibsWithOverride() {
- final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
- final List<LibInstallDTO> libsToInstall = getLibs("installing");
- libraryInstallDTO.setLibs(libsToInstall);
- final UserInfo user = getUser();
+ final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+ final List<LibInstallDTO> libsToInstall = getLibs("installing");
+ libraryInstallDTO.setProject(PROJECT);
+ libraryInstallDTO.setLibs(libsToInstall);
+ final UserInfo user = getUser();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
- when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
- any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
- .thenReturn(libraryInstallDTO);
- when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+ when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+ any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+ .thenReturn(libraryInstallDTO);
+ when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
- final String uuid = libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
- getLibs(null));
+ final String uuid = libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+ COMPUTATIONAL_NAME, getLibs(null));
- assertEquals(UUID, uuid);
+ assertEquals(UUID, uuid);
- libsToInstall.get(0).setOverride(true);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
- verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
- verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME),
- refEq(libsToInstall.get(0)), eq(true));
- verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
- refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
- verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"),
- eq(user.getAccessToken()),
- refEq(libraryInstallDTO), eq(String.class));
- verify(requestId).put(user.getName(), UUID);
- verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+ libsToInstall.get(0).setOverride(true);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+ verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
+ eq(COMPUTATIONAL_NAME), refEq(libsToInstall.get(0)), eq(true));
+ verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()),
+ refEq(getUserComputationalResourceWithName(COMPUTATIONAL_NAME)), eq(libsToInstall), refEq(endpointDTO()));
+ verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/computational/lib_install"),
+ eq(user.getAccessToken()),
+ refEq(libraryInstallDTO), eq(String.class));
+ verify(requestId).put(user.getName(), UUID);
+ verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
- }
+ }
@Test
public void installComputationalLibsWhenLibraryIsAlreadyInstalling() {
- final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
- final List<LibInstallDTO> libsToInstall = getLibs("installing");
- libraryInstallDTO.setLibs(libsToInstall);
- final UserInfo user = getUser();
+ final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+ final List<LibInstallDTO> libsToInstall = getLibs("installing");
+ libraryInstallDTO.setLibs(libsToInstall);
+ final UserInfo user = getUser();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
- when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
- any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
- .thenReturn(libraryInstallDTO);
- when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+ when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class),
+ any(UserComputationalResource.class), anyListOf(LibInstallDTO.class), any(EndpointDTO.class)))
+ .thenReturn(libraryInstallDTO);
+ when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
- try {
- libraryService.installComputationalLibs(user, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
- getLibs(null));
- } catch (DlabException e) {
- assertEquals("Library name is already installing", e.getMessage());
- }
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
- verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
- verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
- }
+ try {
+ libraryService.installComputationalLibs(user, PROJECT, EXPLORATORY_NAME,
+ COMPUTATIONAL_NAME, getLibs(null));
+ } catch (DlabException e) {
+ assertEquals("Library name is already installing", e.getMessage());
+ }
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, LIB_GROUP, LIB_NAME);
+ verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+ }
@Test
public void installExploratoryLibsWithoutOverride() {
- final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
- final List<LibInstallDTO> libsToInstall = getLibs("installing");
- libraryInstallDTO.setLibs(libsToInstall);
- final UserInfo user = getUser();
+ final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+ final List<LibInstallDTO> libsToInstall = getLibs("installing");
+ libraryInstallDTO.setLibs(libsToInstall);
+ final UserInfo user = getUser();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
- when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
- anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+ when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+ anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
- final String uuid = libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
+ final String uuid = libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
- assertEquals(UUID, uuid);
+ assertEquals(UUID, uuid);
- verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
- verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
- verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
- refEq(libraryInstallDTO), eq(String.class));
- verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(false));
- verify(requestId).put(user.getName(), UUID);
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
- }
+ verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+ verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
+ verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
+ refEq(libraryInstallDTO), eq(String.class));
+ verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(false));
+ verify(requestId).put(user.getName(), UUID);
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+ }
@Test
public void installExploratoryLibsWithOverride() {
- final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
- final List<LibInstallDTO> libsToInstall = getLibs("installing");
- libraryInstallDTO.setLibs(libsToInstall);
- final UserInfo user = getUser();
+ final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+ final List<LibInstallDTO> libsToInstall = getLibs("installing");
+ libraryInstallDTO.setLibs(libsToInstall);
+ final UserInfo user = getUser();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
- when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
- anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
- when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+ when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+ anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+ when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLED));
- final String uuid = libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
+ final String uuid = libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
- assertEquals(UUID, uuid);
+ assertEquals(UUID, uuid);
- libsToInstall.get(0).setOverride(true);
- verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
- verify(libraryDAO).addLibrary(eq(USER), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(true));
- verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
- verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
- refEq(libraryInstallDTO), eq(String.class));
- verify(requestId).put(USER, uuid);
- verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
- }
+ libsToInstall.get(0).setOverride(true);
+ verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+ verify(libraryDAO).addLibrary(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME), refEq(libsToInstall.get(0)), eq(true));
+ verify(requestBuilder).newLibInstall(refEq(user), refEq(getUserInstanceDto()), eq(endpointDTO()), eq(libsToInstall));
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(provisioningService).post(eq(endpointDTO().getUrl() + "library/exploratory/lib_install"), eq(user.getAccessToken()),
+ refEq(libraryInstallDTO), eq(String.class));
+ verify(requestId).put(USER, uuid);
+ verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+ }
@Test
public void installExploratoryLibsWhenLibIsAlreadyInstalling() {
- final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
- final List<LibInstallDTO> libsToInstall = getLibs("installing");
- libraryInstallDTO.setLibs(libsToInstall);
- final UserInfo user = getUser();
+ final LibraryInstallDTO libraryInstallDTO = new LibraryInstallDTO();
+ final List<LibInstallDTO> libsToInstall = getLibs("installing");
+ libraryInstallDTO.setLibs(libsToInstall);
+ final UserInfo user = getUser();
- when(endpointService.get(anyString())).thenReturn(endpointDTO());
- when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString())).thenReturn(getUserInstanceDto());
- when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
- when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
- anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
- when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
+ when(endpointService.get(anyString())).thenReturn(endpointDTO());
+ when(exploratoryDAO.fetchRunningExploratoryFields(anyString(), anyString(), anyString())).thenReturn(getUserInstanceDto());
+ when(provisioningService.post(anyString(), anyString(), any(LibraryInstallDTO.class), any())).thenReturn(UUID);
+ when(requestBuilder.newLibInstall(any(UserInfo.class), any(UserInstanceDTO.class), any(EndpointDTO.class),
+ anyListOf(LibInstallDTO.class))).thenReturn(libraryInstallDTO);
+ when(libraryDAO.getLibrary(anyString(), anyString(), anyString(), anyString(), anyString())).thenReturn(getLibrary(LibStatus.INSTALLING));
- try {
- libraryService.installExploratoryLibs(user, EXPLORATORY_NAME, getLibs(null));
- } catch (DlabException e) {
- assertEquals("Library name is already installing", e.getMessage());
- }
+ try {
+ libraryService.installExploratoryLibs(user, PROJECT, EXPLORATORY_NAME, getLibs(null));
+ } catch (DlabException e) {
+ assertEquals("Library name is already installing", e.getMessage());
+ }
- verify(libraryDAO).getLibrary(USER, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
- verify(exploratoryDAO).fetchRunningExploratoryFields(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
+ verify(libraryDAO).getLibrary(USER, PROJECT, EXPLORATORY_NAME, LIB_GROUP, LIB_NAME);
+ verify(exploratoryDAO).fetchRunningExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(libraryDAO, requestBuilder, provisioningService, requestId, exploratoryDAO);
- }
+ }
private Library getLibrary(LibStatus status) {
return new Library(LIB_GROUP, LIB_NAME, "1", status, "");
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
index 9b5e887..1aefbbb 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/ReuploadKeyServiceImplTest.java
@@ -25,13 +25,10 @@
import com.epam.dlab.backendapi.domain.RequestId;
import com.epam.dlab.backendapi.service.ExploratoryService;
import com.epam.dlab.backendapi.util.RequestBuilder;
-import com.epam.dlab.dto.UserInstanceDTO;
import com.epam.dlab.dto.UserInstanceStatus;
import com.epam.dlab.dto.reuploadkey.ReuploadKeyCallbackDTO;
-import com.epam.dlab.dto.reuploadkey.ReuploadKeyDTO;
import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatus;
import com.epam.dlab.dto.reuploadkey.ReuploadKeyStatusDTO;
-import com.epam.dlab.exceptions.DlabException;
import com.epam.dlab.model.ResourceData;
import com.epam.dlab.model.ResourceType;
import com.epam.dlab.rest.client.RESTService;
@@ -45,14 +42,16 @@
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import static com.epam.dlab.dto.UserInstanceStatus.REUPLOADING_KEY;
import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class ReuploadKeyServiceImplTest {
@@ -111,16 +110,16 @@
@Test
public void updateResourceDataForExploratoryWhenStatusCompleted() {
ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
- when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
+ when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(), anyString(),
any(UserInstanceStatus.class))).thenReturn(mock(UpdateResult.class));
- doNothing().when(exploratoryDAO).updateReuploadKeyForExploratory(anyString(), anyString(), anyBoolean());
+ doNothing().when(exploratoryDAO).updateReuploadKeyForExploratory(anyString(), anyString(), anyString(), anyBoolean());
ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.COMPLETED);
reuploadKeyService.updateResourceData(dto);
- verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, RUNNING);
- verify(exploratoryDAO).updateReuploadKeyForExploratory(USER, EXPLORATORY_NAME, false);
+ verify(exploratoryDAO).updateStatusForExploratory(USER, null, EXPLORATORY_NAME, RUNNING);
+ verify(exploratoryDAO).updateReuploadKeyForExploratory(USER, null, EXPLORATORY_NAME, false);
verifyNoMoreInteractions(exploratoryDAO);
verifyZeroInteractions(computationalDAO);
}
@@ -128,14 +127,14 @@
@Test
public void updateResourceDataForExploratoryWhenStatusFailed() {
ResourceData resource = new ResourceData(ResourceType.EXPLORATORY, "someId", EXPLORATORY_NAME, null);
- when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(),
+ when(exploratoryDAO.updateStatusForExploratory(anyString(), anyString(), anyString(),
any(UserInstanceStatus.class))).thenReturn(mock(UpdateResult.class));
ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.FAILED);
reuploadKeyService.updateResourceData(dto);
- verify(exploratoryDAO).updateStatusForExploratory(USER, EXPLORATORY_NAME, RUNNING);
+ verify(exploratoryDAO).updateStatusForExploratory(USER, null, EXPLORATORY_NAME, RUNNING);
verifyNoMoreInteractions(exploratoryDAO);
verifyZeroInteractions(computationalDAO);
}
@@ -144,16 +143,16 @@
public void updateResourceDataForClusterWhenStatusCompleted() {
ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME, "compName");
doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
- any(UserInstanceStatus.class));
+ anyString(), any(UserInstanceStatus.class));
doNothing().when(computationalDAO).updateReuploadKeyFlagForComputationalResource(anyString(), anyString(),
- anyString(), anyBoolean());
+ anyString(), anyString(), anyBoolean());
ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.COMPLETED);
reuploadKeyService.updateResourceData(dto);
- verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME, "compName", RUNNING);
- verify(computationalDAO).updateReuploadKeyFlagForComputationalResource(USER, EXPLORATORY_NAME, "compName",
- false);
+ verify(computationalDAO).updateStatusForComputationalResource(USER, null, EXPLORATORY_NAME, "compName", RUNNING);
+ verify(computationalDAO).updateReuploadKeyFlagForComputationalResource(USER, null, EXPLORATORY_NAME,
+ "compName", false);
verifyNoMoreInteractions(computationalDAO);
verifyZeroInteractions(exploratoryDAO);
}
@@ -162,12 +161,12 @@
public void updateResourceDataForClusterWhenStatusFailed() {
ResourceData resource = new ResourceData(ResourceType.COMPUTATIONAL, "someId", EXPLORATORY_NAME, "compName");
doNothing().when(computationalDAO).updateStatusForComputationalResource(anyString(), anyString(), anyString(),
- any(UserInstanceStatus.class));
+ anyString(), any(UserInstanceStatus.class));
ReuploadKeyStatusDTO dto = getReuploadKeyStatusDTO(resource, ReuploadKeyStatus.FAILED);
reuploadKeyService.updateResourceData(dto);
- verify(computationalDAO).updateStatusForComputationalResource(USER, EXPLORATORY_NAME, "compName", RUNNING);
+ verify(computationalDAO).updateStatusForComputationalResource(USER, null, EXPLORATORY_NAME, "compName", RUNNING);
verifyNoMoreInteractions(computationalDAO);
verifyZeroInteractions(exploratoryDAO);
}
@@ -176,10 +175,6 @@
return new UserInfo(USER, TOKEN);
}
- private UserInstanceDTO getUserInstance() {
- return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME);
- }
-
private ReuploadKeyStatusDTO getReuploadKeyStatusDTO(ResourceData resource, ReuploadKeyStatus status) {
return new ReuploadKeyStatusDTO().withReuploadKeyCallbackDto(
new ReuploadKeyCallbackDTO().withResource(resource)).withReuploadKeyStatus(status).withUser(USER);
diff --git a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
index 8a7d8ec..c025651 100644
--- a/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
+++ b/services/self-service/src/test/java/com/epam/dlab/backendapi/service/impl/SchedulerJobServiceImplTest.java
@@ -43,16 +43,40 @@
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
-import java.time.*;
+import java.time.DayOfWeek;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
import java.time.temporal.ChronoUnit;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Optional;
import java.util.stream.Collectors;
-import static com.epam.dlab.dto.UserInstanceStatus.*;
+import static com.epam.dlab.dto.UserInstanceStatus.RUNNING;
+import static com.epam.dlab.dto.UserInstanceStatus.STARTING;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPED;
+import static com.epam.dlab.dto.UserInstanceStatus.STOPPING;
import static java.util.Collections.singletonList;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.mockito.Matchers.anyVararg;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.refEq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class SchedulerJobServiceImplTest {
@@ -92,84 +116,83 @@
@Test
public void fetchSchedulerJobForUserAndExploratory() {
- when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString()))
+ when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString()))
.thenReturn(Optional.of(schedulerJobDTO));
SchedulerJobDTO actualSchedulerJobDto =
- schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+ schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
assertNotNull(actualSchedulerJobDto);
assertEquals(schedulerJobDTO, actualSchedulerJobDto);
- verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
+ verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
verifyNoMoreInteractions(exploratoryDAO, schedulerJobDAO);
}
@Test
public void fetchSchedulerJobForUserAndExploratoryWhenNotebookNotExist() {
- when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString())).thenReturn(Optional.empty());
+ when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString())).thenReturn(Optional.empty());
try {
- schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+ schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
} catch (ResourceNotFoundException e) {
assertEquals("Scheduler job data not found for user test with exploratory explName", e.getMessage());
}
- verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
+ verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
verifyNoMoreInteractions(schedulerJobDAO);
}
@Test
public void fetchEmptySchedulerJobForUserAndExploratory() {
- when(exploratoryDAO.isExploratoryExist(anyString(), anyString())).thenReturn(true);
- when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString()))
+ when(schedulerJobDAO.fetchSingleSchedulerJobByUserAndExploratory(anyString(), anyString(), anyString()))
.thenReturn(Optional.empty());
try {
- schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, EXPLORATORY_NAME);
+ schedulerJobService.fetchSchedulerJobForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
} catch (ResourceNotFoundException e) {
assertEquals("Scheduler job data not found for user test with exploratory explName", e.getMessage());
}
- verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, EXPLORATORY_NAME);
- verifyNoMoreInteractions(exploratoryDAO, schedulerJobDAO);
+ verify(schedulerJobDAO).fetchSingleSchedulerJobByUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME);
+ verifyNoMoreInteractions(schedulerJobDAO);
}
@Test
public void fetchSchedulerJobForComputationalResource() {
- when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString()))
+ when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString(), anyString()))
.thenReturn(Optional.of(schedulerJobDTO));
SchedulerJobDTO actualSchedulerJobDto = schedulerJobService
- .fetchSchedulerJobForComputationalResource(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ .fetchSchedulerJobForComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
assertNotNull(actualSchedulerJobDto);
assertEquals(schedulerJobDTO, actualSchedulerJobDto);
- verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
verifyNoMoreInteractions(computationalDAO, schedulerJobDAO);
}
@Test
public void fetchEmptySchedulerJobForComputationalResource() {
- when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString()))
+ when(schedulerJobDAO.fetchSingleSchedulerJobForCluster(anyString(), anyString(), anyString(), anyString()))
.thenReturn(Optional.empty());
try {
- schedulerJobService.fetchSchedulerJobForComputationalResource(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ schedulerJobService.fetchSchedulerJobForComputationalResource(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
} catch (ResourceNotFoundException e) {
assertEquals("Scheduler job data not found for user test with exploratory explName with " +
"computational resource compName", e.getMessage());
}
- verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(schedulerJobDAO).fetchSingleSchedulerJobForCluster(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
verifyNoMoreInteractions(computationalDAO, schedulerJobDAO);
}
@Test
public void updateSchedulerDataForUserAndExploratory() {
userInstance.withStatus("running");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
- schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+ schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
- verify(computationalDAO).updateSchedulerSyncFlag(USER, EXPLORATORY_NAME, false);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+ verify(computationalDAO).updateSchedulerSyncFlag(USER, PROJECT, EXPLORATORY_NAME, false);
verifyNoMoreInteractions(exploratoryDAO);
verifyZeroInteractions(computationalDAO);
}
@@ -177,13 +200,13 @@
@Test
public void updateSchedulerDataForUserAndExploratoryWhenMethodFetchExploratoryFieldsThrowsException() {
doThrow(new ResourceNotFoundException("Exploratory for user with name not found"))
- .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString());
+ .when(exploratoryDAO).fetchExploratoryFields(anyString(), anyString(), anyString());
try {
- schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+ schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
} catch (ResourceNotFoundException e) {
assertEquals("Exploratory for user with name not found", e.getMessage());
}
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
verifyNoMoreInteractions(exploratoryDAO);
verifyZeroInteractions(computationalDAO);
}
@@ -191,14 +214,14 @@
@Test
public void updateSchedulerDataForUserAndExploratoryWithInapproprietaryStatus() {
userInstance.withStatus("terminated");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
try {
- schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+ schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
} catch (ResourceInappropriateStateException e) {
assertEquals("Can not create/update scheduler for user instance with status: terminated",
e.getMessage());
}
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
verifyNoMoreInteractions(exploratoryDAO);
verifyZeroInteractions(computationalDAO);
}
@@ -208,21 +231,21 @@
schedulerJobDTO.setBeginDate(null);
schedulerJobDTO.setTimeZoneOffset(null);
userInstance.withStatus("running");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
assertNull(schedulerJobDTO.getBeginDate());
assertNull(schedulerJobDTO.getTimeZoneOffset());
- schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+ schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
assertEquals(LocalDate.now(), schedulerJobDTO.getBeginDate());
assertEquals(OffsetDateTime.now(ZoneId.systemDefault()).getOffset(), schedulerJobDTO.getTimeZoneOffset());
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
- verify(computationalDAO).updateSchedulerSyncFlag(USER, EXPLORATORY_NAME, false);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+ verify(computationalDAO).updateSchedulerSyncFlag(USER, PROJECT, EXPLORATORY_NAME, false);
verifyNoMoreInteractions(exploratoryDAO);
verifyZeroInteractions(computationalDAO);
}
@@ -232,25 +255,24 @@
public void updateSchedulerDataForUserAndExploratoryWithSyncStartRequiredParam() {
userInstance.withStatus("running");
schedulerJobDTO.setSyncStartRequired(true);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
- when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), any(List.class),
- anyString(), anyVararg())).thenReturn(singletonList(COMPUTATIONAL_NAME));
+ when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), anyString(),
+ any(List.class), anyString(), anyVararg())).thenReturn(singletonList(COMPUTATIONAL_NAME));
when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
- any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+ anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
- schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+ schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
- verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER,
- singletonList(DataEngineType.SPARK_STANDALONE),
- EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+ verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER, PROJECT,
+ singletonList(DataEngineType.SPARK_STANDALONE), EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
schedulerJobDTO.setEndTime(null);
schedulerJobDTO.setStopDaysRepeat(Collections.emptyList());
- verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
- COMPUTATIONAL_NAME, schedulerJobDTO);
+ verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+ EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
}
@@ -259,19 +281,18 @@
public void updateSchedulerDataForUserAndExploratoryWithSyncStartRequiredParamButAbsenceClusters() {
userInstance.withStatus("running");
schedulerJobDTO.setSyncStartRequired(true);
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(),
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.updateSchedulerDataForUserAndExploratory(anyString(), anyString(), anyString(),
any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
- when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), any(List.class),
- anyString(), anyVararg())).thenReturn(Collections.emptyList());
+ when(computationalDAO.getComputationalResourcesWhereStatusIn(anyString(), anyString(),
+ any(List.class), anyString(), anyVararg())).thenReturn(Collections.emptyList());
- schedulerJobService.updateExploratorySchedulerData(USER, EXPLORATORY_NAME, schedulerJobDTO);
+ schedulerJobService.updateExploratorySchedulerData(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, EXPLORATORY_NAME, schedulerJobDTO);
- verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER,
- singletonList(DataEngineType.SPARK_STANDALONE),
- EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(exploratoryDAO).updateSchedulerDataForUserAndExploratory(USER, PROJECT, EXPLORATORY_NAME, schedulerJobDTO);
+ verify(computationalDAO).getComputationalResourcesWhereStatusIn(USER, PROJECT,
+ singletonList(DataEngineType.SPARK_STANDALONE), EXPLORATORY_NAME, STARTING, RUNNING, STOPPING, STOPPED);
verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
}
@@ -279,30 +300,30 @@
@Test
public void updateSchedulerDataForComputationalResource() {
userInstance.withStatus("running");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
.thenReturn(userInstance.getResources().get(0));
when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
- any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+ anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
- schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
- schedulerJobDTO);
-
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
- verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
+ schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
COMPUTATIONAL_NAME, schedulerJobDTO);
+
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+ EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
}
@Test
public void updateSchedulerDataForComputationalResourceWhenSchedulerIsNull() {
userInstance.withStatus("running");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
.thenReturn(userInstance.getResources().get(0));
when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
- any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+ anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
final SchedulerJobDTO schedulerJobDTO = getSchedulerJobDTO(LocalDate.now(), LocalDate.now().plusDays(1),
Arrays.asList(DayOfWeek.values()), Arrays.asList(DayOfWeek.values()), false,
@@ -310,12 +331,12 @@
LocalTime.now().truncatedTo(ChronoUnit.MINUTES));
schedulerJobDTO.setStartDaysRepeat(null);
schedulerJobDTO.setStopDaysRepeat(null);
- schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
- schedulerJobDTO);
+ schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
+ COMPUTATIONAL_NAME, schedulerJobDTO);
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
- verify(computationalDAO).updateSchedulerDataForComputationalResource(eq(USER), eq(EXPLORATORY_NAME),
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(computationalDAO).updateSchedulerDataForComputationalResource(eq(USER), eq(PROJECT), eq(EXPLORATORY_NAME),
eq(COMPUTATIONAL_NAME), refEq(schedulerJobDTO));
verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
}
@@ -323,18 +344,17 @@
@Test
public void updateSchedulerDataForComputationalResourceWhenMethodFetchComputationalFieldsThrowsException() {
userInstance.withStatus("running");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
doThrow(new ResourceNotFoundException("Computational resource for user with name not found"))
- .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString());
+ .when(computationalDAO).fetchComputationalFields(anyString(), anyString(), anyString(), anyString());
try {
- schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME,
- COMPUTATIONAL_NAME, schedulerJobDTO);
+ schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
} catch (ResourceNotFoundException e) {
assertEquals("Computational resource for user with name not found", e.getMessage());
}
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
}
@@ -342,18 +362,17 @@
public void updateSchedulerDataForComputationalResourceWithInapproprietaryClusterStatus() {
userInstance.setStatus("running");
userInstance.getResources().get(0).setStatus("terminated");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
.thenReturn(userInstance.getResources().get(0));
try {
- schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME,
- COMPUTATIONAL_NAME, schedulerJobDTO);
+ schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
} catch (ResourceInappropriateStateException e) {
assertEquals("Can not create/update scheduler for user instance with status: terminated",
e.getMessage());
}
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
}
@@ -362,25 +381,25 @@
schedulerJobDTO.setBeginDate(null);
schedulerJobDTO.setTimeZoneOffset(null);
userInstance.withStatus("running");
- when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString())).thenReturn(userInstance);
- when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString()))
+ when(exploratoryDAO.fetchExploratoryFields(anyString(), anyString(), anyString())).thenReturn(userInstance);
+ when(computationalDAO.fetchComputationalFields(anyString(), anyString(), anyString(), anyString()))
.thenReturn(userInstance.getResources().get(0));
when(computationalDAO.updateSchedulerDataForComputationalResource(anyString(), anyString(), anyString(),
- any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
+ anyString(), any(SchedulerJobDTO.class))).thenReturn(mock(UpdateResult.class));
assertNull(schedulerJobDTO.getBeginDate());
assertNull(schedulerJobDTO.getTimeZoneOffset());
- schedulerJobService.updateComputationalSchedulerData(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME,
- schedulerJobDTO);
+ schedulerJobService.updateComputationalSchedulerData(USER, PROJECT, EXPLORATORY_NAME,
+ COMPUTATIONAL_NAME, schedulerJobDTO);
assertEquals(LocalDate.now(), schedulerJobDTO.getBeginDate());
assertEquals(OffsetDateTime.now(ZoneId.systemDefault()).getOffset(), schedulerJobDTO.getTimeZoneOffset());
- verify(exploratoryDAO).fetchExploratoryFields(USER, EXPLORATORY_NAME);
- verify(computationalDAO).fetchComputationalFields(USER, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
- verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, EXPLORATORY_NAME,
- COMPUTATIONAL_NAME, schedulerJobDTO);
+ verify(exploratoryDAO).fetchExploratoryFields(USER, PROJECT, EXPLORATORY_NAME);
+ verify(computationalDAO).fetchComputationalFields(USER, PROJECT, EXPLORATORY_NAME, COMPUTATIONAL_NAME);
+ verify(computationalDAO).updateSchedulerDataForComputationalResource(USER, PROJECT,
+ EXPLORATORY_NAME, COMPUTATIONAL_NAME, schedulerJobDTO);
verifyNoMoreInteractions(exploratoryDAO, computationalDAO);
}
@@ -499,7 +518,7 @@
verify(securityService).getServiceAccountInfo(USER);
verify(schedulerJobDAO)
.getComputationalSchedulerDataWithOneOfStatus(RUNNING, DataEngineType.SPARK_STANDALONE, RUNNING);
- verify(computationalService).stopSparkCluster(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
+ verify(computationalService).stopSparkCluster(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME),
eq(COMPUTATIONAL_NAME));
verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService);
}
@@ -597,7 +616,7 @@
verify(securityService).getServiceAccountInfo(USER);
verify(schedulerJobDAO).getExploratorySchedulerWithStatusAndClusterLastActivityLessThan(eq(RUNNING),
any(Date.class));
- verify(exploratoryService).stop(refEq(getUserInfo()), eq(EXPLORATORY_NAME));
+ verify(exploratoryService).stop(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME));
verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService);
}
@@ -711,7 +730,7 @@
)));
when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
- any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+ anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
DataEngineType.SPARK_STANDALONE, true)));
schedulerJobService.startExploratoryByScheduler();
@@ -719,7 +738,7 @@
verify(securityService, times(2)).getServiceAccountInfo(USER);
verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
- verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+ verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
verify(computationalService).startSparkCluster(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
eq(COMPUTATIONAL_NAME), eq(PROJECT));
verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalService,
@@ -738,7 +757,7 @@
)));
when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
- any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+ anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
DataEngineType.CLOUD_SERVICE, true)));
schedulerJobService.startExploratoryByScheduler();
@@ -746,7 +765,7 @@
verify(securityService).getServiceAccountInfo(USER);
verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
- verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+ verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalDAO);
verifyZeroInteractions(computationalService);
}
@@ -763,7 +782,7 @@
)));
when(securityService.getServiceAccountInfo(anyString())).thenReturn(getUserInfo());
when(computationalDAO.findComputationalResourcesWithStatus(anyString(), anyString(),
- any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
+ anyString(), any(UserInstanceStatus.class))).thenReturn(singletonList(getComputationalResource(
DataEngineType.SPARK_STANDALONE, false)));
schedulerJobService.startExploratoryByScheduler();
@@ -771,7 +790,7 @@
verify(securityService).getServiceAccountInfo(USER);
verify(schedulerJobDAO).getExploratorySchedulerDataWithStatus(STOPPED);
verify(exploratoryService).start(refEq(getUserInfo()), eq(EXPLORATORY_NAME), eq(PROJECT));
- verify(computationalDAO).findComputationalResourcesWithStatus(USER, EXPLORATORY_NAME, STOPPED);
+ verify(computationalDAO).findComputationalResourcesWithStatus(USER, PROJECT, EXPLORATORY_NAME, STOPPED);
verifyNoMoreInteractions(securityService, schedulerJobDAO, exploratoryService, computationalDAO);
verifyZeroInteractions(computationalService);
}
@@ -862,8 +881,8 @@
verify(securityService).getServiceAccountInfo(USER);
verify(schedulerJobDAO)
.getComputationalSchedulerDataWithOneOfStatus(RUNNING, STOPPED, RUNNING);
- verify(computationalService).terminateComputational(refEq(getUserInfo()), eq(EXPLORATORY_NAME),
- eq(COMPUTATIONAL_NAME));
+ verify(computationalService).terminateComputational(refEq(getUserInfo()), eq(PROJECT),
+ eq(EXPLORATORY_NAME), eq(COMPUTATIONAL_NAME));
verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService);
}
@@ -955,7 +974,7 @@
verify(securityService).getUserInfoOffline(USER);
verify(schedulerJobDAO).getExploratorySchedulerDataWithOneOfStatus(RUNNING, STOPPED);
- verify(exploratoryService).terminate(refEq(getUserInfo()), eq(EXPLORATORY_NAME));
+ verify(exploratoryService).terminate(refEq(getUserInfo()), eq(PROJECT), eq(EXPLORATORY_NAME));
verifyNoMoreInteractions(securityService, schedulerJobDAO, computationalService, exploratoryService);
}
@@ -1085,8 +1104,11 @@
private UserInstanceDTO getUserInstanceDTO() {
UserComputationalResource computationalResource = new UserComputationalResource();
computationalResource.setStatus("running");
- return new UserInstanceDTO().withUser(USER).withExploratoryName(EXPLORATORY_NAME)
- .withResources(singletonList(computationalResource));
+ return new UserInstanceDTO()
+ .withUser(USER)
+ .withExploratoryName(EXPLORATORY_NAME)
+ .withResources(singletonList(computationalResource))
+ .withProject(PROJECT);
}
private AwsComputationalResource getComputationalResource(DataEngineType dataEngineType,