blob: ae0996315a11ac93064cfb95cfe7c0c01cb9db53 [file] [log] [blame]
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "yaml"
_config = YAML.load(File.open(File.join(File.dirname(__FILE__), "vagrantconfig.yaml"), File::RDONLY).read)
CONF = _config
# Override vagrant configurations using environment variables
keys = CONF.keys
keys.each do |k|
if ENV[k.upcase] != nil then
puts "Overide from environment variable: " + k.upcase + " = " + ENV[k.upcase]
if /^\d+/.match(ENV[k.upcase])
CONF[k] = Integer(ENV[k.upcase])
else
CONF[k] = ENV[k.upcase]
end
end
end
# Repository
# Example for testing a Release candidate.
repo = CONF['repo']
# repo = "http://bigtop.s3.amazonaws.com/releases/0.7.0/redhat/6/x86_64"
# Which Linux Distribution to use. Right now only centos is tested
distro = CONF['distro']
# number of instances
num_instances = CONF['num_instances']
# hadoop ecosystem components
components = CONF['components']
# Whether to run smoke tests
run_smoke_tests = CONF['run_smoke_tests']
# Smoke test Components to run
smoke_test_components = CONF['smoke_test_components'].join(',')
# This is a update to allow dev packages
# Force success - not worried if this step fails, since we generally only use it for development.
enable_local_repo = CONF['enable_local_repo']
puts "vagrant conf local repo enabled: #{enable_local_repo}"
# master node hostname
bigtop_master = "bigtop1.vagrant"
$script = <<SCRIPT
service iptables stop
service firewalld stop
chkconfig iptables off
# Remove 127.0.0.1 entry since vagrant's hostname setting will map it to FQDN,
# which miss leads some daemons to bind on 127.0.0.1 instead of public or private IP
sed -i "/127.0.0.1/d" /etc/hosts
echo "Bigtop yum repo = #{repo}"
# Prepare host manipulation files needed in standalone box
cp /bigtop-home/provisioner/vagrant/config_hosts /etc/init.d
cp /bigtop-home/provisioner/vagrant/gen_hosts.sh /usr/bin
chkconfig --add config_hosts
# Prepare puppet configuration file
mkdir -p /etc/puppet/hieradata
cp /bigtop-home/bigtop-deploy/puppet/hiera.yaml /etc/puppet
cp -r /bigtop-home/bigtop-deploy/puppet/hieradata/bigtop/ /etc/puppet/hieradata/
cat > /etc/puppet/hieradata/site.yaml << EOF
bigtop::hadoop_head_node: #{bigtop_master}
hadoop::hadoop_storage_dirs: [/data/1, /data/2]
bigtop::bigtop_repo_uri: #{repo}
hadoop_cluster_node::cluster_components: #{components}
hadoop::common_hdfs::testonly_hdfs_sshkeys: "yes"
EOF
SCRIPT
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# manage /etc/hosts by hostmanager plugin(https://github.com/smdahlen/vagrant-hostmanager)
# use vagrant-cachier to cache packages at local(https://github.com/fgrehm/vagrant-cachier)
config.hostmanager.enabled = true
# use vagrant-cachier to cache packages at local(https://github.com/fgrehm/vagrant-cachier)
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
# nodes definition
(1..num_instances).each do |i|
config.vm.define "bigtop#{i}" do |bigtop|
bigtop.vm.box = CONF['box']
bigtop_hostname="bigtop#{i}"
bigtop_fqdn="#{bigtop_hostname}.vagrant"
bigtop_ip="10.10.10.1#{i}"
bigtop.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", CONF['memory_size']]
vb.customize ['modifyvm', :id, '--cpus', CONF['number_cpus']]
end
bigtop.vm.network :private_network, ip: bigtop_ip
bigtop.vm.hostname = bigtop_fqdn
# two levels up is the bigtop "home" directory.
# the current directory has puppet recipes which we need for provisioning.
bigtop.vm.synced_folder "../../", "/bigtop-home"
# We also add the bigtop-home output/ dir, so that locally built rpms will be available.
puts "Adding output/ repo ? #{enable_local_repo}"
# Install puppet
bigtop.vm.provision :shell do |shell|
shell.path = "../../bigtop_toolchain/bin/puppetize.sh"
end
# carry on w/ installation
bigtop.vm.provision :shell do |shell|
shell.path = "../utils/setup-env-" + distro + ".sh"
shell.args = ["#{enable_local_repo}"]
end
bigtop.vm.provision "shell", inline: $script
# Add the ip to FQDN and hostname mapping in /etc/hosts
bigtop.hostmanager.aliases = "#{bigtop_fqdn} #{bigtop_hostname}"
# deploy Apache Hadoop and it's ecosystem
bigtop.vm.provision :puppet do |puppet|
puppet.module_path = "../../bigtop-deploy/puppet/modules/"
puppet.manifests_path = "../../bigtop-deploy/puppet/manifests/"
puppet.manifest_file = "."
puppet.options = "--parser future"
end
if run_smoke_tests then
if i==num_instances then
puts "creating provisioner directive for running tests"
bigtop.vm.provision :shell do |shell|
shell.path = "../utils/smoke-tests.sh"
shell.args = ["#{smoke_test_components}"]
end
else
puts "Not creating provisioner directive for tests yet... only on vm #{i} of #{num_instances}"
end
end
end
end
end