Merge pull request #936 from ustcweizhou/agent-name

Add agent name in received responseBefore change:

2015-10-15 12:55:34,268 DEBUG [c.c.a.t.Request] (RouterStatusMonitor-1:ctx-d2b917a4) Seq 20-2693152577167557361: Sending  { Cmd , MgmtId: 345051313197, via: 20(KVM015), Ver: v1, Flags: 100011, [{"com.cloud.agent.api.routing.GetRouterAlertsCommand":{"previousAlertTimeStamp":"1970-01-01 00:00:00","accessDetails":{"router.ip":"169.254.3.13"},"wait":0}}] }
2015-10-15 12:55:34,416 DEBUG [c.c.a.t.Request] (AgentManager-Handler-7:null) Seq 20-2693152577167557361: Processing:  { Ans: , MgmtId: 345051313197, via: 20, Ver: v1, Flags: 10, [{"com.cloud.agent.api.GetRouterAlertsAnswer":{"result":true,"wait":0}}] }
2015-10-15 12:55:34,416 DEBUG [c.c.a.t.Request] (RouterStatusMonitor-1:ctx-d2b917a4) Seq 20-2693152577167557361: Received:  { Ans: , MgmtId: 345051313197, via: 20, Ver: v1, Flags: 10, { GetRouterAlertsAnswer } }

After change:

2015-10-15 13:31:04,562 DEBUG [c.c.a.t.Request] (RouterStatusMonitor-1:ctx-cf9a7691) Seq 22-6444651066767179831: Sending  { Cmd , MgmtId: 345051313197, via: 22(node12), Ver: v1, Flags: 100011, [{"com.cloud.agent.api.routing.GetRouterAlertsCommand":{"previousAlertTimeStamp":"1970-01-01 00:00:00","accessDetails":{"router.ip":"169.254.3.89"},"wait":0}}] }
2015-10-15 13:31:04,676 DEBUG [c.c.a.t.Request] (AgentManager-Handler-4:null) Seq 22-6444651066767179831: Processing:  { Ans: , MgmtId: 345051313197, via: 22, Ver: v1, Flags: 10, [{"com.cloud.agent.api.GetRouterAlertsAnswer":{"result":true,"wait":0}}] }
2015-10-15 13:31:04,677 DEBUG [c.c.a.t.Request] (RouterStatusMonitor-1:ctx-cf9a7691) Seq 22-6444651066767179831: Received:  { Ans: , MgmtId: 345051313197, via: 22(node12), Ver: v1, Flags: 10, { GetRouterAlertsAnswer } }

* pr/936:
  Modify description of startvm in DeployVMCmd
  Add agent name in received response

Signed-off-by: Remi Bergsma <github@remi.nl>
diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties
index 4bc96a3..cbf66ac 100644
--- a/client/WEB-INF/classes/resources/messages.properties
+++ b/client/WEB-INF/classes/resources/messages.properties
@@ -1830,7 +1830,7 @@
 message.download.volume.confirm=Please confirm that you want to download this volume.
 message.download.volume=Please click <a href\="\#">00000</a> to download volume
 message.edit.account=Edit ("-1" indicates no limit to the amount of resources create)
-message.edit.confirm=Please confirm that your changes before clicking "Save".
+message.edit.confirm=Please confirm your changes before clicking "Save".
 message.edit.limits=Please specify limits to the following resources.  A "-1" indicates no limit to the amount of resources create.
 message.edit.traffic.type=Please specify the traffic label you want associated with this traffic type.
 message.enable.account=Please confirm that you want to enable this account.
@@ -2177,4 +2177,4 @@
 message.removed.ssh.key.pair=Removed a SSH Key Pair
 message.please.select.ssh.key.pair.use.with.this.vm=Please select a ssh key pair you want this VM to use:
 message.configure.firewall.rules.allow.traffic=Configure the rules to allow Traffic
-message.configure.firewall.rules.block.traffic=Configure the rules to block Traffic
\ No newline at end of file
+message.configure.firewall.rules.block.traffic=Configure the rules to block Traffic
diff --git a/packaging/centos63/cloud-ipallocator.rc b/packaging/centos63/cloud-ipallocator.rc
index d26287d..d3eadec 100755
--- a/packaging/centos63/cloud-ipallocator.rc
+++ b/packaging/centos63/cloud-ipallocator.rc
@@ -25,7 +25,7 @@
 
 # set environment variables
 
-SHORTNAME=`basename $0`
+SHORTNAME="$(basename $(readlink -f $0))"
 PIDFILE=/var/run/"$SHORTNAME".pid
 LOCKFILE=/var/lock/subsys/"$SHORTNAME"
 LOGFILE=/var/log/cloudstack/ipallocator/ipallocator.log
diff --git a/packaging/centos63/cloud-management.rc b/packaging/centos63/cloud-management.rc
index 6d28748..f5ed7a8 100755
--- a/packaging/centos63/cloud-management.rc
+++ b/packaging/centos63/cloud-management.rc
@@ -42,7 +42,7 @@
 fi
 
 
-NAME="$(basename $0)"
+NAME="$(basename $(readlink -f $0))"
 export SERVICE_NAME="$NAME"
 stop() {
 	SHUTDOWN_WAIT="30"
diff --git a/packaging/centos7/cloud-ipallocator.rc b/packaging/centos7/cloud-ipallocator.rc
index d26287d..d3eadec 100755
--- a/packaging/centos7/cloud-ipallocator.rc
+++ b/packaging/centos7/cloud-ipallocator.rc
@@ -25,7 +25,7 @@
 
 # set environment variables
 
-SHORTNAME=`basename $0`
+SHORTNAME="$(basename $(readlink -f $0))"
 PIDFILE=/var/run/"$SHORTNAME".pid
 LOCKFILE=/var/lock/subsys/"$SHORTNAME"
 LOGFILE=/var/log/cloudstack/ipallocator/ipallocator.log
diff --git a/packaging/fedora20/cloud-ipallocator.rc b/packaging/fedora20/cloud-ipallocator.rc
index d26287d..d3eadec 100755
--- a/packaging/fedora20/cloud-ipallocator.rc
+++ b/packaging/fedora20/cloud-ipallocator.rc
@@ -25,7 +25,7 @@
 
 # set environment variables
 
-SHORTNAME=`basename $0`
+SHORTNAME="$(basename $(readlink -f $0))"
 PIDFILE=/var/run/"$SHORTNAME".pid
 LOCKFILE=/var/lock/subsys/"$SHORTNAME"
 LOGFILE=/var/log/cloudstack/ipallocator/ipallocator.log
diff --git a/packaging/fedora20/cloud-management.rc b/packaging/fedora20/cloud-management.rc
index 6d28748..f5ed7a8 100755
--- a/packaging/fedora20/cloud-management.rc
+++ b/packaging/fedora20/cloud-management.rc
@@ -42,7 +42,7 @@
 fi
 
 
-NAME="$(basename $0)"
+NAME="$(basename $(readlink -f $0))"
 export SERVICE_NAME="$NAME"
 stop() {
 	SHUTDOWN_WAIT="30"
diff --git a/packaging/fedora21/cloud-ipallocator.rc b/packaging/fedora21/cloud-ipallocator.rc
index d26287d..d3eadec 100755
--- a/packaging/fedora21/cloud-ipallocator.rc
+++ b/packaging/fedora21/cloud-ipallocator.rc
@@ -25,7 +25,7 @@
 
 # set environment variables
 
-SHORTNAME=`basename $0`
+SHORTNAME="$(basename $(readlink -f $0))"
 PIDFILE=/var/run/"$SHORTNAME".pid
 LOCKFILE=/var/lock/subsys/"$SHORTNAME"
 LOGFILE=/var/log/cloudstack/ipallocator/ipallocator.log
diff --git a/packaging/fedora21/cloud-management.rc b/packaging/fedora21/cloud-management.rc
index 6d28748..f5ed7a8 100755
--- a/packaging/fedora21/cloud-management.rc
+++ b/packaging/fedora21/cloud-management.rc
@@ -42,7 +42,7 @@
 fi
 
 
-NAME="$(basename $0)"
+NAME="$(basename $(readlink -f $0))"
 export SERVICE_NAME="$NAME"
 stop() {
 	SHUTDOWN_WAIT="30"
diff --git a/server/src/com/cloud/network/StorageNetworkManagerImpl.java b/server/src/com/cloud/network/StorageNetworkManagerImpl.java
index 76a51d9..1e6616f 100644
--- a/server/src/com/cloud/network/StorageNetworkManagerImpl.java
+++ b/server/src/com/cloud/network/StorageNetworkManagerImpl.java
@@ -104,20 +104,19 @@
         String insertSql =
             "INSERT INTO `cloud`.`op_dc_storage_network_ip_address` (range_id, ip_address, mac_address, taken) VALUES (?, ?, (select mac_address from `cloud`.`data_center` where id=?), ?)";
         String updateSql = "UPDATE `cloud`.`data_center` set mac_address = mac_address+1 where id=?";
-        try (Connection conn = txn.getConnection();) {
-            while (startIPLong <= endIPLong) {
-                try (PreparedStatement stmt_insert = conn.prepareStatement(insertSql); ) {
-                    stmt_insert.setLong(1, rangeId);
-                    stmt_insert.setString(2, NetUtils.long2Ip(startIPLong++));
-                    stmt_insert.setLong(3, zoneId);
-                    stmt_insert.setNull(4, java.sql.Types.DATE);
-                    stmt_insert.executeUpdate();
-                }
+        Connection conn = txn.getConnection();
+        while (startIPLong <= endIPLong) {
+            try (PreparedStatement stmt_insert = conn.prepareStatement(insertSql);) {
+                stmt_insert.setLong(1, rangeId);
+                stmt_insert.setString(2, NetUtils.long2Ip(startIPLong++));
+                stmt_insert.setLong(3, zoneId);
+                stmt_insert.setNull(4, java.sql.Types.DATE);
+                stmt_insert.executeUpdate();
+            }
 
-                try (PreparedStatement stmt_update = txn.prepareStatement(updateSql);) {
-                    stmt_update.setLong(1, zoneId);
-                    stmt_update.executeUpdate();
-                }
+            try (PreparedStatement stmt_update = txn.prepareStatement(updateSql);) {
+                stmt_update.setLong(1, zoneId);
+                stmt_update.executeUpdate();
             }
         }
     }
diff --git a/setup/db/db/schema-452to460.sql b/setup/db/db/schema-452to460.sql
index 5887e53..af16849 100644
--- a/setup/db/db/schema-452to460.sql
+++ b/setup/db/db/schema-452to460.sql
@@ -403,6 +403,8 @@
   CONSTRAINT `fk_external_bigswitch_bcf_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
 
+UPDATE `cloud`.`host` SET `resource`='com.cloud.hypervisor.xenserver.resource.XenServer600Resource' WHERE `resource`='com.cloud.hypervisor.xenserver.resource.XenServer602Resource';
+
 CREATE TABLE `cloud`.`ldap_trust_map` (
   `id` int unsigned NOT NULL AUTO_INCREMENT,
   `domain_id` bigint unsigned NOT NULL,
@@ -413,3 +415,6 @@
   UNIQUE KEY `uk_ldap_trust_map__domain_id` (`domain_id`),
   CONSTRAINT `fk_ldap_trust_map__domain_id` FOREIGN KEY (`domain_id`) REFERENCES `domain` (`id`)
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Red Hat Enterprise Linux 7', 245, utc_timestamp(), 0);
+INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'CentOS 7', 246, utc_timestamp(), 0);
diff --git a/systemvm/patches/debian/config/etc/init.d/cloud-early-config b/systemvm/patches/debian/config/etc/init.d/cloud-early-config
index 79a85e7..6fbedff 100755
--- a/systemvm/patches/debian/config/etc/init.d/cloud-early-config
+++ b/systemvm/patches/debian/config/etc/init.d/cloud-early-config
@@ -117,18 +117,31 @@
           if [ ! -e /dev/vport0p1 ]; then
             log_it "/dev/vport0p1 not loaded, perhaps guest kernel is too old." && exit 2
           fi
-          while [ -z "$cmd" ]; do
-            while read line; do
-              if [[ $line == cmdline:* ]]; then
-                cmd=${line//cmdline:/}
+
+	      local factor=2
+	      local progress=1
+		  for i in {1..5}
+		  do
+	        while read line; do
+	          if [[ $line == cmdline:* ]]; then
+	            cmd=${line//cmdline:/}
                 echo $cmd > /var/cache/cloud/cmdline
-              elif [[ $line == pubkey:* ]]; then
-                pubkey=${line//pubkey:/}
-                echo $pubkey > /var/cache/cloud/authorized_keys
-                echo $pubkey > /root/.ssh/authorized_keys
+	          elif [[ $line == pubkey:* ]]; then
+	            pubkey=${line//pubkey:/}
+	            echo $pubkey > /var/cache/cloud/authorized_keys
+	            echo $pubkey > /root/.ssh/authorized_keys
               fi
-            done < /dev/vport0p1
-	  done
+	        done < /dev/vport0p1
+	        # In case of reboot we do not send the boot args again.
+	        # So, no need to wait for them, as the boot args are already set at startup
+	        if [ -s /var/cache/cloud/cmdline  ]
+	        then
+              log_it "Found a non empty cmdline file. Will now exit the loop and proceed with configuration."
+              break;
+            fi
+            sleep ${progress}s
+            progress=$[ progress * factor ]
+		  done
           chmod go-rwx /root/.ssh/authorized_keys
           ;;
      vmware)
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/checkrouter.sh b/systemvm/patches/debian/config/opt/cloud/bin/checkrouter.sh
index 12b2da4..f05b440 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/checkrouter.sh
+++ b/systemvm/patches/debian/config/opt/cloud/bin/checkrouter.sh
@@ -16,9 +16,20 @@
 # specific language governing permissions and limitations
 # under the License.
 
-STATUS=$(cat /etc/cloudstack/cmdline.json | grep redundant_state | awk '{print $2;}' | sed -e 's/[,\"]//g')
-if [ "$?" -ne "0" ]
+STATUS=UNKNOWN
+INTERFACE=eth1
+ROUTER_TYPE=$(cat /etc/cloudstack/cmdline.json | grep type | awk '{print $2;}' | sed -e 's/[,\"]//g')
+if [ $ROUTER_TYPE = "router" ]
 then
-	   STATUS=MASTER
+    INTERFACE=eth2
 fi
-echo "Status: ${STATUS}"
+
+ETH1_STATE=$(ip addr | grep $INTERFACE | grep state | awk '{print $9;}')
+if [ $ETH1_STATE = "UP" ]
+then
+    STATUS=MASTER
+elif [ $ETH1_STATE = "DOWN" ]
+then
+    STATUS=BACKUP
+fi
+echo "Status: ${STATUS}"
\ No newline at end of file
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/configure.py b/systemvm/patches/debian/config/opt/cloud/bin/configure.py
index 014e294..8c39f75 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/configure.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/configure.py
@@ -481,11 +481,11 @@
             file.addeq("  dpddelay=30")
             file.addeq("  dpdtimeout=120")
             file.addeq("  dpdaction=restart")
-        file.commit()
         secret = CsFile(vpnsecretsfile)
         secret.search("%s " % leftpeer, "%s %s: PSK \"%s\"" % (leftpeer, rightpeer, obj['ipsec_psk']))
-        secret.commit()
         if secret.is_changed() or file.is_changed():
+            secret.commit()
+            file.commit()
             logging.info("Configured vpn %s %s", leftpeer, rightpeer)
             CsHelper.execute("ipsec auto --rereadall")
             CsHelper.execute("ipsec --add vpn-%s" % rightpeer)
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py
index 074a63f..b80187a 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py
@@ -95,22 +95,6 @@
                 return ip
         return None
 
-    def check_if_link_exists(self,dev):
-        cmd="ip link show dev %s"%dev
-        result = CsHelper.execute(cmd)
-        if(len(result) != 0):
-           return True
-        else:
-           return False
-
-    def check_if_link_up(self,dev):
-        cmd="ip link show dev %s | tr '\n' ' ' | cut -d ' ' -f 9"%dev
-        result = CsHelper.execute(cmd)
-        if(result and result[0].lower() == "up"):
-            return True
-        else:
-            return False
-
     def process(self):
         for dev in self.dbag:
             if dev == "id":
@@ -118,11 +102,6 @@
             ip = CsIP(dev, self.config)
 
             for address in self.dbag[dev]:
-                #check if link is up
-                if not self.check_if_link_up(dev):
-                   cmd="ip link set %s up" % dev
-                   CsHelper.execute(cmd)
-
                 ip.setAddress(address)
 
                 if ip.configured():
@@ -328,7 +307,7 @@
             if " DOWN " in i:
                 cmd2 = "ip link set %s up" % self.getDevice()
                 # If redundant do not bring up public interfaces
-                # master.py and keepalived deal with tham
+                # master.py and keepalived will deal with them
                 if self.cl.is_redundant() and not self.is_public():
                     CsHelper.execute(cmd2)
                 # if not redundant bring everything up
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsApp.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsApp.py
index de53fe0..a0b4c6e 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsApp.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsApp.py
@@ -50,8 +50,8 @@
         file.search("Listen .*:80", "Listen %s:80" % (self.ip))
         file.search("Listen .*:443", "Listen %s:443" % (self.ip))
         file.search("ServerName.*", "\tServerName vhost%s.cloudinternal.com" % (self.dev))
-        file.commit()
         if file.is_changed():
+            file.commit()
             CsHelper.service("apache2", "restart")
 
         self.fw.append(["", "front",
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsDhcp.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsDhcp.py
index 234ed4c..02e7bd7 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsDhcp.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsDhcp.py
@@ -36,21 +36,23 @@
         self.preseed()
         self.cloud = CsFile(DHCP_HOSTS)
         self.conf = CsFile(CLOUD_CONF)
-        length = len(self.conf)
+
         for item in self.dbag:
             if item == "id":
                 continue
             self.add(self.dbag[item])
         self.write_hosts()
+        
         if self.cloud.is_changed():
             self.delete_leases()
+
         self.configure_server()
+
+        # We restart DNSMASQ every time the configure.py is called in order to avoid lease problems.
+        CsHelper.service("dnsmasq", "restart")
+
         self.conf.commit()
         self.cloud.commit()
-        if self.conf.is_changed():
-            CsHelper.service("dnsmasq", "restart")
-        elif self.cloud.is_changed():
-            CsHelper.hup_dnsmasq("dnsmasq", "dnsmasq")
 
     def configure_server(self):
         # self.conf.addeq("dhcp-hostsfile=%s" % DHCP_HOSTS)
@@ -131,8 +133,8 @@
         file.repopulate()
         for ip in self.hosts:
             file.add("%s\t%s" % (ip, self.hosts[ip]))
-        file.commit()
         if file.is_changed():
+            file.commit()
             logging.info("Updated hosts file")
         else:
             logging.debug("Hosts file unchanged")
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsFile.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsFile.py
index 319b48e..7829c0a 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsFile.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsFile.py
@@ -64,6 +64,9 @@
             handle.write(line)
         handle.close()
         logging.info("Wrote edited file %s" % self.filename)
+        self.config = list(self.new_config)
+        logging.info("Updated file in-cache configuration")
+        
 
     def dump(self):
         for line in self.new_config:
@@ -160,4 +163,6 @@
 
 
     def compare(self, o):
-        return (isinstance(o, self.__class__) and set(self.config) == set(o.new_config))
+        result = (isinstance(o, self.__class__) and set(self.config) == set(o.config))
+        logging.debug("Comparison of CsFiles content is ==> %s" % result)
+        return result
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsLoadBalancer.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsLoadBalancer.py
index a288eac..d8f39dc 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsLoadBalancer.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsLoadBalancer.py
@@ -17,7 +17,6 @@
 import logging
 import os.path
 import re
-import shutil
 from cs.CsDatabag import CsDataBag
 from CsProcess import CsProcess
 from CsFile import CsFile
@@ -37,13 +36,14 @@
             return
         config = self.dbag['config'][0]['configuration']
         file1 = CsFile(HAPROXY_CONF_T)
-        file2 = CsFile(HAPROXY_CONF_P)
         file1.empty()
         for x in config:
             [file1.append(w, -1) for w in x.split('\n')]
+
+        file1.commit()
+        file2 = CsFile(HAPROXY_CONF_P)
         if not file2.compare(file1):
-            file1.commit()
-            shutil.copy2(HAPROXY_CONF_T, HAPROXY_CONF_P)
+            CsHelper.copy(HAPROXY_CONF_T, HAPROXY_CONF_P)
 
             proc = CsProcess(['/var/run/haproxy.pid'])
             if not proc.find():
@@ -55,13 +55,15 @@
 
         add_rules = self.dbag['config'][0]['add_rules']
         remove_rules = self.dbag['config'][0]['remove_rules']
-        self._configure_firewall(add_rules, remove_rules)
+        stat_rules = self.dbag['config'][0]['stat_rules']
+        self._configure_firewall(add_rules, remove_rules, stat_rules)
 
-    def _configure_firewall(self, add_rules, remove_rules):
+    def _configure_firewall(self, add_rules, remove_rules, stat_rules):
         firewall = self.config.get_fw()
 
         logging.debug("CsLoadBalancer:: configuring firewall. Add rules ==> %s" % add_rules)
         logging.debug("CsLoadBalancer:: configuring firewall. Remove rules ==> %s" % remove_rules)
+        logging.debug("CsLoadBalancer:: configuring firewall. Stat rules ==> %s" % stat_rules)
 
         for rules in add_rules:
             path = rules.split(':')
@@ -74,3 +76,9 @@
             ip = path[0]
             port = path[1]
             firewall.append(["filter", "", "-D INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
+
+        for rules in stat_rules:
+            path = rules.split(':')
+            ip = path[0]
+            port = path[1]
+            firewall.append(["filter", "", "-A INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsRedundant.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsRedundant.py
index abe997c..7ae1bd4 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsRedundant.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsRedundant.py
@@ -82,12 +82,10 @@
     def _redundant_on(self):
         guest = self.address.get_guest_if()
         # No redundancy if there is no guest network
-        if self.cl.is_master() or guest is None:
-            for obj in [o for o in self.address.get_ips() if o.is_public()]:
-                self.check_is_up(obj.get_device())
         if guest is None:
             self._redundant_off()
             return
+
         CsHelper.mkdir(self.CS_RAMDISK_DIR, 0755, False)
         CsHelper.mount_tmpfs(self.CS_RAMDISK_DIR)
         CsHelper.mkdir(self.CS_ROUTER_DIR, 0755, False)
@@ -102,10 +100,6 @@
             "%s/%s" % (self.CS_TEMPLATES_DIR, "keepalived.conf.templ"), self.KEEPALIVED_CONF)
         CsHelper.copy_if_needed(
             "%s/%s" % (self.CS_TEMPLATES_DIR, "checkrouter.sh.templ"), "/opt/cloud/bin/checkrouter.sh")
-        #The file is always copied so the RVR doesn't't get the wrong config.
-        #Concerning the r-VPC, the configuration will be applied in a different manner
-        CsHelper.copy(
-            "%s/%s" % (self.CS_TEMPLATES_DIR, "conntrackd.conf.templ"), self.CONNTRACKD_CONF)
 
         CsHelper.execute(
             'sed -i "s/--exec\ \$DAEMON;/--exec\ \$DAEMON\ --\ --vrrp;/g" /etc/init.d/keepalived')
@@ -127,12 +121,16 @@
                                 "        auth_type AH \n", "        auth_pass %s\n" % self.cl.get_router_password()])
         keepalived_conf.section(
             "virtual_ipaddress {", "}", self._collect_ips())
-        keepalived_conf.commit()
 
         # conntrackd configuration
-        connt = CsFile(self.CONNTRACKD_CONF)
+        conntrackd_template_conf = "%s/%s" % (self.CS_TEMPLATES_DIR, "conntrackd.conf.templ")
+        conntrackd_temp_bkp = "%s/%s" % (self.CS_TEMPLATES_DIR, "conntrackd.conf.templ.bkp")
+        
+        CsHelper.copy(conntrackd_template_conf, conntrackd_temp_bkp)
+        
+        conntrackd_tmpl = CsFile(conntrackd_template_conf)
         if guest is not None:
-            connt.section("Multicast {", "}", [
+            conntrackd_tmpl.section("Multicast {", "}", [
                           "IPv4_address 225.0.0.50\n",
                           "Group 3780\n",
                           "IPv4_interface %s\n" % guest.get_ip(),
@@ -140,12 +138,21 @@
                           "SndSocketBuffer 1249280\n",
                           "RcvSocketBuffer 1249280\n",
                           "Checksum on\n"])
-            connt.section("Address Ignore {", "}", self._collect_ignore_ips())
-            connt.commit()
+            conntrackd_tmpl.section("Address Ignore {", "}", self._collect_ignore_ips())
+            conntrackd_tmpl.commit()
 
-        if connt.is_changed():
+        conntrackd_conf = CsFile(self.CONNTRACKD_CONF)
+
+        is_equals = conntrackd_tmpl.compare(conntrackd_conf)
+        proc = CsProcess(['/etc/conntrackd/conntrackd.conf'])
+        if not proc.find() or not is_equals:
+            CsHelper.copy(conntrackd_template_conf, self.CONNTRACKD_CONF)
             CsHelper.service("conntrackd", "restart")
 
+        # Restore the template file and remove the backup.
+        CsHelper.copy(conntrackd_temp_bkp, conntrackd_template_conf)
+        CsHelper.execute("rm -rf %s" % conntrackd_temp_bkp)
+
         # Configure heartbeat cron job - runs every 30 seconds
         heartbeat_cron = CsFile("/etc/cron.d/heartbeat")
         heartbeat_cron.add("SHELL=/bin/bash", 0)
@@ -173,8 +180,9 @@
         conntrackd_cron.add("@reboot root service conntrackd start", -1)
         conntrackd_cron.commit()
 
-        proc = CsProcess(['/usr/sbin/keepalived', '--vrrp'])
+        proc = CsProcess(['/usr/sbin/keepalived'])
         if not proc.find() or keepalived_conf.is_changed():
+            keepalived_conf.commit()
             CsHelper.service("keepalived", "restart")
 
     def release_lock(self):
@@ -285,7 +293,6 @@
                     route.add_defaultroute(gateway)
                 except:
                     logging.error("ERROR getting gateway from device %s" % dev)
-                    
             else:
                 logging.error("Device %s was not ready could not bring it up" % dev)
 
@@ -300,6 +307,7 @@
         ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
         for o in ads:
             CsPasswdSvc(o.get_gateway()).restart()
+
         CsHelper.service("dnsmasq", "restart")
         self.cl.set_master_state(True)
         self.cl.save()
@@ -326,7 +334,7 @@
 
         In a DomR there will only ever be one address in a VPC there can be many
         The new code also gives the possibility to cloudstack to have a hybrid device
-        thet could function as a router and VPC router at the same time
+        that could function as a router and VPC router at the same time
         """
         lines = []
         for o in self.address.get_ips():
@@ -337,12 +345,12 @@
                 else:
                     str = "        %s brd %s dev %s\n" % (o.get_gateway_cidr(), o.get_broadcast(), o.get_device())
                 lines.append(str)
-                self.check_is_up(o.get_device())
         return lines
 
     def check_is_up(self, device):
         """ Ensure device is up """
         cmd = "ip link show %s | grep 'state DOWN'" % device
+
         for i in CsHelper.execute(cmd):
             if " DOWN " in i:
                 cmd2 = "ip link set %s up" % device
diff --git a/systemvm/patches/debian/config/opt/cloud/bin/master.py b/systemvm/patches/debian/config/opt/cloud/bin/master.py
index 41386f7..c3a1539 100755
--- a/systemvm/patches/debian/config/opt/cloud/bin/master.py
+++ b/systemvm/patches/debian/config/opt/cloud/bin/master.py
@@ -42,6 +42,9 @@
                     format=config.get_format())
 config.cmdline()
 cl = CsCmdLine("cmdline", config)
+#Update the configuration to set state as backup and let keepalived decide who is the real Master
+cl.set_master_state(False)
+cl.save()
 
 config.set_address()
 red = CsRedundant(config)
diff --git a/systemvm/patches/debian/config/opt/cloud/templates/checkrouter.sh.templ b/systemvm/patches/debian/config/opt/cloud/templates/checkrouter.sh.templ
index 12b2da4..f05b440 100755
--- a/systemvm/patches/debian/config/opt/cloud/templates/checkrouter.sh.templ
+++ b/systemvm/patches/debian/config/opt/cloud/templates/checkrouter.sh.templ
@@ -16,9 +16,20 @@
 # specific language governing permissions and limitations
 # under the License.
 
-STATUS=$(cat /etc/cloudstack/cmdline.json | grep redundant_state | awk '{print $2;}' | sed -e 's/[,\"]//g')
-if [ "$?" -ne "0" ]
+STATUS=UNKNOWN
+INTERFACE=eth1
+ROUTER_TYPE=$(cat /etc/cloudstack/cmdline.json | grep type | awk '{print $2;}' | sed -e 's/[,\"]//g')
+if [ $ROUTER_TYPE = "router" ]
 then
-	   STATUS=MASTER
+    INTERFACE=eth2
 fi
-echo "Status: ${STATUS}"
+
+ETH1_STATE=$(ip addr | grep $INTERFACE | grep state | awk '{print $9;}')
+if [ $ETH1_STATE = "UP" ]
+then
+    STATUS=MASTER
+elif [ $ETH1_STATE = "DOWN" ]
+then
+    STATUS=BACKUP
+fi
+echo "Status: ${STATUS}"
\ No newline at end of file
diff --git a/test/integration/component/test_vpc_redundant.py b/test/integration/component/test_vpc_redundant.py
index 6f82aec..600850b 100644
--- a/test/integration/component/test_vpc_redundant.py
+++ b/test/integration/component/test_vpc_redundant.py
@@ -37,8 +37,11 @@
 from marvin.lib.common import (get_domain,
                                get_zone,
                                get_template,
-                               list_routers)
-from marvin.lib.utils import cleanup_resources
+                               list_routers,
+                               list_hosts)
+from marvin.lib.utils import (cleanup_resources,
+                              get_process_status,
+                              get_host_credentials)
 import socket
 import time
 import inspect
@@ -236,7 +239,10 @@
         self.routers = []
         self.networks = []
         self.ips = []
+
         self.apiclient = self.testClient.getApiClient()
+        self.hypervisor = self.testClient.getHypervisorInfo()
+
         self.account = Account.create(
             self.apiclient,
             self.services["account"],
@@ -288,13 +294,59 @@
             len(self.routers), count,
             "Check that %s routers were indeed created" % count)
 
-    def check_master_status(self, count=2, showall=False):
+    def check_master_status(self,count=2, showall=False):
         vals = ["MASTER", "BACKUP", "UNKNOWN"]
         cnts = [0, 0, 0]
+
+        result = "UNKNOWN"
         self.query_routers(count, showall)
         for router in self.routers:
             if router.state == "Running":
-                cnts[vals.index(router.redundantstate)] += 1
+                hosts = list_hosts(
+                    self.apiclient,
+                    zoneid=router.zoneid,
+                    type='Routing',
+                    state='Up',
+                    id=router.hostid
+                )
+                self.assertEqual(
+                    isinstance(hosts, list),
+                    True,
+                    "Check list host returns a valid list"
+                )
+                host = hosts[0]
+
+                if self.hypervisor.lower() in ('vmware', 'hyperv'):
+                        result = str(get_process_status(
+                            self.apiclient.connection.mgtSvr,
+                            22,
+                            self.apiclient.connection.user,
+                            self.apiclient.connection.passwd,
+                            router.linklocalip,
+                            "sh /opt/cloud/bin/checkrouter.sh ",
+                            hypervisor=self.hypervisor
+                        ))
+                else:
+                    try:
+                        host.user, host.passwd = get_host_credentials(
+                            self.config, host.ipaddress)
+                        result = str(get_process_status(
+                            host.ipaddress,
+                            22,
+                            host.user,
+                            host.passwd,
+                            router.linklocalip,
+                            "sh /opt/cloud/bin/checkrouter.sh "
+                        ))
+
+                    except KeyError:
+                        self.skipTest(
+                            "Marvin configuration has no host credentials to\
+                                    check router services")
+            
+                if result.count(vals[0]) == 1:
+                    cnts[vals.index(vals[0])] += 1
+
         if cnts[vals.index('MASTER')] != 1:
             self.fail("No Master or too many master routers found %s" % cnts[vals.index('MASTER')])
 
@@ -458,26 +510,20 @@
         self.query_routers()
         self.networks.append(self.create_network(self.services["network_offering"], "10.1.1.1"))
         self.networks.append(self.create_network(self.services["network_offering_no_lb"], "10.1.2.1"))
-        time.sleep(30)
         self.check_master_status(2)
         self.add_nat_rules()
         self.do_vpc_test(False)
-        time.sleep(30)
         
         self.stop_router_by_type("MASTER")
-        # wait for the backup router to transit to master state
-        time.sleep(30)
         self.check_master_status(1)
         self.do_vpc_test(False)
 
         self.delete_nat_rules()
-        time.sleep(45)
         self.check_master_status(1)
         self.do_vpc_test(True)
 
         self.start_routers()
         self.add_nat_rules()
-        time.sleep(30)
         self.check_master_status(2)
         self.do_vpc_test(False)
 
@@ -488,7 +534,6 @@
         self.query_routers()
         self.networks.append(self.create_network(self.services["network_offering"], "10.1.1.1"))
         self.networks.append(self.create_network(self.services["network_offering_no_lb"], "10.1.2.1"))
-        time.sleep(30)
         self.check_master_status(2)
         self.add_nat_rules()
         self.do_default_routes_test()
@@ -510,7 +555,7 @@
                     time.sleep(5)
 
     def do_vpc_test(self, expectFail):
-        retries = 20
+        retries = 5
         if expectFail:
             retries = 2
         for o in self.networks:
diff --git a/test/integration/smoke/test_internal_lb.py b/test/integration/smoke/test_internal_lb.py
index 174782f..da845ea 100644
--- a/test/integration/smoke/test_internal_lb.py
+++ b/test/integration/smoke/test_internal_lb.py
@@ -16,63 +16,303 @@
 # under the License.
 """ Tests for configuring Internal Load Balancing Rules.
 """
-#Import Local Modules
-from marvin.codes import FAILED
-from marvin.cloudstackTestCase import *
-from marvin.cloudstackAPI import *
-from marvin.lib.utils import *
-from marvin.lib.base import *
-from marvin.lib.common import *
+# Import Local Modules
+from marvin.codes import PASS, FAILED
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.lib.utils import (cleanup_resources,
+                              get_process_status)
+from marvin.lib.base import (Domain,
+                             Account,
+                             Configurations,
+                             VPC,
+                             VpcOffering,
+                             ServiceOffering,
+                             NetworkOffering,
+                             Network,
+                             PublicIPAddress,
+                             NATRule,
+                             NetworkACL,
+                             LoadBalancerRule,
+                             ApplicationLoadBalancer,
+                             VirtualMachine,
+                             Template,
+                             FireWallRule,
+                             StaticNATRule,
+                             NetworkACLList
+                             )
+
+from marvin.sshClient import SshClient
+
+
+from marvin.lib.common import (get_zone,
+                               get_domain,
+                               get_template,
+                               list_network_offerings)
+
 from nose.plugins.attrib import attr
 
+import logging
+import time
+import math
+
+
+class Services:
+
+    """Test VPC network services - Port Forwarding Rules Test Data Class.
+    """
+
+    def __init__(self):
+        self.services = {
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "host1": None,
+            "host2": None,
+            "default_hypervisor": "kvm",
+            "compute_offering": {
+                "name": "Tiny Instance",
+                "displaytext": "Tiny Instance",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+            },
+            "network_offering": {
+                "name": 'VPC Network offering',
+                "displaytext": 'VPC Network',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,Lb,PortForwarding,UserData,StaticNat,NetworkACL',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceProviderList": {
+                    "Vpn": 'VpcVirtualRouter',
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "Lb": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+            },
+            "network_offering_internal_lb": {
+                "name": 'VPC Network Internal Lb offering',
+                "displaytext": 'VPC Network internal lb',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL,Lb',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceCapabilityList": {
+                    "Lb": {
+                        "SupportedLbIsolation": 'dedicated',
+                        "lbSchemes": 'internal'
+                    }
+                },
+                "serviceProviderList": {
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter',
+                    "Lb": 'InternalLbVm'
+                },
+                "egress_policy": "true",
+            },
+            "vpc_offering": {
+                "name": 'Redundant VPC off',
+                "displaytext": 'Redundant VPC off',
+                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
+                "serviceProviderList": {
+                    "Vpn": 'VpcVirtualRouter',
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "Lb": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+                "serviceCapabilityList": {
+                    "SourceNat": {
+                        "RedundantRouter": 'true'
+                    }
+                },
+            },
+            "vpc": {
+                "name": "TestVPC",
+                "displaytext": "TestVPC",
+                "cidr": '10.1.0.0/16'
+            },
+            "network": {
+                "name": "Test Network",
+                "displaytext": "Test Network",
+                "netmask": '255.255.255.0'
+            },
+            "lbrule": {
+                "name": "SSH",
+                "alg": "roundrobin",
+                # Algorithm used for load balancing
+                "privateport": 22,
+                "publicport": 2222,
+                "openfirewall": False,
+                "startport": 22,
+                "endport": 2222,
+                "protocol": "TCP",
+                "cidrlist": '0.0.0.0/0',
+            },
+            "lbrule_http": {
+                "name": "HTTP",
+                "alg": "roundrobin",
+                # Algorithm used for load balancing
+                "privateport": 80,
+                "publicport": 80,
+                "openfirewall": False,
+                "startport": 80,
+                "endport": 80,
+                "protocol": "TCP",
+                "cidrlist": '0.0.0.0/0',
+            },
+            "natrule": {
+                "protocol": "TCP",
+                "cidrlist": '0.0.0.0/0',
+            },
+            "http_rule": {
+                "privateport": 80,
+                "publicport": 80,
+                "startport": 80,
+                "endport": 80,
+                "cidrlist": '0.0.0.0/0',
+                "protocol": "TCP"
+            },
+            "virtual_machine": {
+                "displayname": "Test VM",
+                "username": "root",
+                "password": "password",
+                "ssh_port": 22,
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": 'TCP',
+            },
+            "template_kvm": {
+                "name": "tiny-kvm",
+                "displaytext": "macchinina kvm",
+                "format": "qcow2",
+                "hypervisor": "kvm",
+                "ostype": "Other PV (64-bit)",
+                "url": "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-kvm.qcow2.bz2",
+                "requireshvm": "True",
+            },
+            "template_xen": {
+                "name": "tiny-xen",
+                "displaytext": "macchinina xen",
+                "format": "vhd",
+                "hypervisor": "xen",
+                "ostype": "Other (64-bit)",
+                "url": "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-xen.vhd.bz2",
+                "requireshvm": "True",
+            },
+        }
+
+
 class TestInternalLb(cloudstackTestCase):
+
     """Test Internal LB
     """
 
     @classmethod
     def setUpClass(cls):
+
+        cls.logger = logging.getLogger('TestInternalLb')
+        cls.stream_handler = logging.StreamHandler()
+        cls.logger.setLevel(logging.DEBUG)
+        cls.logger.addHandler(cls.stream_handler)
+
         testClient = super(TestInternalLb, cls).getClsTestClient()
         cls.apiclient = testClient.getApiClient()
-        cls.services = testClient.getParsedTestDataConfig()
+        cls.services = Services().services
 
         cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
         cls.domain = get_domain(cls.apiclient)
-        cls.service_offering = ServiceOffering.create(
+        cls.compute_offering = ServiceOffering.create(
             cls.apiclient,
-            cls.services["service_offerings"]["tiny"]
+            cls.services["compute_offering"]
         )
-        cls.account = Account.create(cls.apiclient, services=cls.services["account"])
-        cls.template = get_template(
-            cls.apiclient,
-            cls.zone.id,
-            cls.services["ostype"]
-        )
+
+        cls.account = Account.create(
+            cls.apiclient, services=cls.services["account"])
+
+        if cls.services["default_hypervisor"] == "kvm":
+            cls.template = Template.register(cls.apiclient, cls.services["template_kvm"], cls.zone.id, hypervisor=cls.services[
+                                             "template_kvm"]["hypervisor"], account=cls.account.name, domainid=cls.domain.id)
+        else:
+            cls.template = Template.register(cls.apiclient, cls.services["template_xen"], cls.zone.id, hypervisor=cls.services[
+                                             "template_xen"]["hypervisor"], account=cls.account.name, domainid=cls.domain.id)
 
         if cls.template == FAILED:
-            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
+            assert False, "get_template() failed to return template"
 
-        cls.debug("Successfully created account: %s, id: \
-                   %s" % (cls.account.name,\
+        cls.logger.debug("Successfully created account: %s, id: \
+                   %s" % (cls.account.name,
                           cls.account.id))
+
         cls.cleanup = [cls.account]
+        return
 
-    @attr(tags=["smoke", "advanced"], required_hardware="true")
-    def test_internallb(self):
-        """Test create, delete, assign, remove of internal loadbalancer
-        """   
-           #1) Create and enable network offering with Internal Lb vm service
-        self.networkOffering = NetworkOffering.create(self.apiclient, self.services["network_offering_internal_lb"], conservemode=False)
-        #TODO: SIMENH:modify this test to verify lb rules by sending request from another tier
-        self.networkOffering.update(self.apiclient, state="Enabled")
+    def get_networkoffering_state(self, offering):
+        result = list_network_offerings(self.apiclient, id=offering.id)
+        if result:
+            offering = result[0]
+            return offering.state
+        else:
+            return None
 
-        #2) Create VPC and network in it
-        vpcOffering = VpcOffering.list(self.apiclient,isdefault=True)
-        self.assert_(vpcOffering is not None and len(vpcOffering)>0, "No VPC offerings found")
-        self.services["vpc"] = {}
-        self.services["vpc"]["name"] = "vpc-internallb"
-        self.services["vpc"]["displaytext"] = "vpc-internallb"
-        self.services["vpc"]["cidr"] = "10.1.1.0/24"
-        vpc = VPC.create(
+    def create_and_enable_network_serviceoffering(self, services):
+
+        try:
+            # Create offering
+            offering = NetworkOffering.create(
+                self.apiclient, services, conservemode=False)
+
+            self.assertIsNotNone(offering, "Failed to create network offering")
+            self.logger.debug("Created network offering: %s" % offering.id)
+
+            if offering:
+                # Enable offeringq
+                offering.update(self.apiclient, state="Enabled")
+                self.assertEqual(self.get_networkoffering_state(
+                    offering), "Enabled", "Failed to enable network offering")
+
+                self.logger.debug("Enabled network offering: %s" % offering.id)
+                return offering
+
+        except Exception as e:
+            self.fail("Failed to create and enable network offering: %s because of %s" % (
+                offering_name, e))
+
+    def create_vpc(self, name, cidr):
+        print name, cidr
+        try:
+            vpcOffering = VpcOffering.list(self.apiclient, isdefault=True)
+            self.assert_(vpcOffering is not None and len(
+                vpcOffering) > 0, "No VPC offerings found")
+
+            self.services["vpc"] = {}
+            self.services["vpc"]["name"] = name
+            self.services["vpc"]["displaytext"] = name
+            self.services["vpc"]["cidr"] = cidr
+
+            vpc = VPC.create(
                 apiclient=self.apiclient,
                 services=self.services["vpc"],
                 networkDomain="vpc.internallb",
@@ -80,61 +320,425 @@
                 zoneid=self.zone.id,
                 account=self.account.name,
                 domainid=self.domain.id
+            )
+            self.assertIsNotNone(vpc, "VPC creation failed")
+            self.logger.debug("Created VPC %s" % vpc.id)
+            return vpc
+
+        except Exception as e:
+            self.fail("Failed to create VPC: %s due to %s" % (name, e))
+
+    def create_network_tier(self, name, vpcid, gateway, network_offering):
+        self.services["network"]["name"] = name
+        self.services["network"]["displaytext"] = name
+
+        default_acl = NetworkACLList.list(self.apiclient, name="default_allow")[0]
+
+        try:
+            network = Network.create(
+                apiclient=self.apiclient,
+                services=self.services["network"],
+                accountid=self.account.name,
+                domainid=self.domain.id,
+                networkofferingid=network_offering.id,
+                zoneid=self.zone.id,
+                vpcid=vpcid,
+                gateway=gateway,
+                netmask=self.services["network"]["netmask"],
+                aclid=default_acl.id
+            )
+            self.assertIsNotNone(network, "Network failed to create")
+            self.logger.debug(
+                "Created network %s in VPC %s" % (network.id, vpcid))
+
+            return network
+
+        except Exception as e:
+            raise Exception("Create network failed: %s" % e)
+
+    def deployvm_in_network(self, vpc, networkid):
+
+        try:
+            self.services["virtual_machine"]["networkids"] = networkid
+            vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
+                                       templateid=self.template.id,
+                                       zoneid=self.zone.id,
+                                       accountid=self.account.name,
+                                       domainid=self.domain.id,
+                                       serviceofferingid=self.compute_offering.id,
+                                       hypervisor=self.services[
+                                           "template_kvm"]["hypervisor"]
+                                       )
+            self.assertIsNotNone(
+                vm, "Failed to deploy vm in network: %s" % networkid)
+            self.assert_(vm.state == 'Running', "VM is not running")
+            self.logger.debug("Deployed VM id: %s in VPC %s" % (vm.id, vpc.id))
+
+            return vm
+
+        except Exception as e:
+            raise Exception("Deployment failed of VM: %s" % e)
+
+    def create_internal_loadbalancer(self, intport, sport, algorithm, networkid):
+        try:
+            # 5) Create an Internal Load Balancer
+            applb = ApplicationLoadBalancer.create(self.apiclient, services=self.services,
+                                                   name="lbrule",
+                                                   sourceport=sport,
+                                                   instanceport=intport,
+                                                   algorithm=algorithm,
+                                                   scheme="Internal",
+                                                   sourcenetworkid=networkid,
+                                                   networkid=networkid
+                                                   )
+
+            self.assertIsNotNone(applb, "Failed to create loadbalancer")
+            self.logger.debug("Created LB %s in VPC" % applb.id)
+
+            return applb
+
+        except Exception as e:
+            self.fail(e)
+
+    def acquire_publicip(self, vpc, network):
+        self.logger.debug(
+            "Associating public IP for network: %s" % network.name)
+        public_ip = PublicIPAddress.create(
+            self.apiclient,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            domainid=self.account.domainid,
+            networkid=network.id,
+            vpcid=vpc.id
         )
-        self.assert_(vpc is not None, "VPC creation failed")
-        self.services["vpcnetwork"] = {}
-        self.services["vpcnetwork"]["name"] = "vpcntwk"
-        self.services["vpcnetwork"]["displaytext"] = "vpcntwk"
-        ntwk = Network.create(
+        self.assertIsNotNone(public_ip, "Failed to acquire public IP")
+        self.logger.debug("Associated %s with network %s" % (
+            public_ip.ipaddress.ipaddress,
+            network.id
+        ))
+        return public_ip
+
+    def create_natrule(self, vpc, vm, public_port, private_port, public_ip, network, services=None):
+        self.logger.debug("Creating NAT rule in network for vm with public IP")
+        if not services:
+            self.services["natrule"]["privateport"] = private_port
+            self.services["natrule"]["publicport"] = public_port
+            self.services["natrule"]["startport"] = public_port
+            self.services["natrule"]["endport"] = public_port
+            services = self.services["natrule"]
+
+        nat_rule = NATRule.create(
             apiclient=self.apiclient,
-            services=self.services["vpcnetwork"],
-            accountid=self.account.name,
-            domainid=self.domain.id,
-            networkofferingid=self.networkOffering.id,
-            zoneid=self.zone.id,
-            vpcid=vpc.id,
-            gateway="10.1.1.1",
-            netmask="255.255.255.192"
+            services=services,
+            ipaddressid=public_ip.ipaddress.id,
+            virtual_machine=vm,
+            networkid=network.id
         )
-        self.assertIsNotNone(ntwk, "Network failed to create")
-        self.debug("Network %s created in VPC %s" %(ntwk.id, vpc.id))
+        self.assertIsNotNone(
+            nat_rule, "Failed to create NAT Rule for %s" % public_ip.ipaddress.ipaddress)
+        self.logger.debug(
+            "Adding NetworkACL rules to make NAT rule accessible")
 
-        #3) Deploy a vm
-        self.services["virtual_machine"]["networkids"] = ntwk.id
-        vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
-            templateid=self.template.id,
-            zoneid=self.zone.id,
-            accountid=self.account.name,
-            domainid= self.domain.id,
-            serviceofferingid=self.service_offering.id,
-        )
-        self.assert_(vm is not None, "VM failed to deploy")
-        self.assert_(vm.state == 'Running', "VM is not running")
-        self.debug("VM %s deployed in VPC %s" %(vm.id, vpc.id))
-        
-        #4) Create an Internal Load Balancer
-        applb = ApplicationLoadBalancer.create(self.apiclient, services=self.services,
-                name="lbrule",
-                sourceport=22,
-                instanceport=22,
-                algorithm="roundrobin",
-                scheme="internal",
-                sourcenetworkid=ntwk.id,
-                networkid=ntwk.id)
+        vm.ssh_ip = nat_rule.ipaddress
+        vm.public_ip = nat_rule.ipaddress
+        vm.public_port = int(public_port)
+        return nat_rule
 
-        #5) Assign the VM to the Internal Load Balancer
-        applb.assign(self.apiclient, vms=[vm])
+    def get_ssh_client(self, vm, retries):
+        """ Setup ssh client connection and return connection
+        vm requires attributes public_ip, public_port, username, password """
 
-        #6) Remove the vm from the Interanl Load Balancer
-        applb.remove(self.apiclient, vms=[vm])
+        try:
+            ssh_client = SshClient(
+                vm.public_ip,
+                vm.public_port,
+                vm.username,
+                vm.password,
+                retries
+            )
+        except Exception as e:
+            self.fail("Unable to create ssh connection: " % e)
 
-        #7) Delete the Load Balancer
+        self.assertIsNotNone(
+            ssh_client, "Failed to setup ssh connection to vm=%s on public_ip=%s" % (vm.name, vm.public_ip))
+        return ssh_client
+
+    def setup_http_daemon(self, vm):
+        """ Creates a index.html in /tmp with private ip as content and
+            starts httpd daemon on all interfaces port 80 serving /tmp/
+            (only tested on the busybox based tiny vm)
+            vm requires attributes public_ip, public_port, username, password
+        """
+        commands = [
+            # using ip address instead of hostname
+            "/sbin/ip addr show eth0 |grep 'inet '| cut -f6 -d' ' > /tmp/index.html",
+            "/usr/sbin/httpd -v -p 0.0.0.0:80 -h /tmp/"
+        ]
+        try:
+            ssh_client = self.get_ssh_client(vm, 8)
+            for cmd in commands:
+                ssh_client.execute(cmd)
+        except Exception as e:
+            self.fail("Failed to ssh into vm: %s due to %s" % (vm, e))
+
+    def run_ssh_test_accross_hosts(self, clienthost, lb_address, max_requests=30):
+        """ Uses clienthost to run wgets on hosts port 80 expects a unique output from url.
+            returns a list of outputs to evaluate.
+        """
+        # Setup ssh connection
+        ssh_client = self.get_ssh_client(clienthost, 8)
+        self.logger.debug(ssh_client)
+        results = []
+
+        try:
+            for x in range(0, max_requests):
+                cmd_test_http = "/usr/bin/wget -T2 -qO- http://" + \
+                    lb_address + "/ 2>/dev/null"
+                # self.debug( "SSH into VM public address: %s and port: %s"
+                # %(.public_ip, vm.public_port))
+                results.append(ssh_client.execute(cmd_test_http)[0])
+                self.logger.debug(results)
+
+        except Exception as e:
+            self.fail("%s: SSH failed for VM with IP Address: %s" %
+                      (e, clienthost.public_ip))
+
+        return results
+
+    def get_std_deviation(self, data):
+        """ Calculates and outputs a mean, variance and standard deviation from an input list of values """
+        num_val = len(data)
+        mean = sum(data) / num_val
+        sqrt = map(lambda x: math.pow(abs(x - mean), 2), data)
+        variance = (sum(sqrt) / num_val - 1)
+        stddev = math.sqrt(variance)
+        return (mean, variance, stddev)
+
+    def evaluate_http_responses(self, responses, algorithm):
+        """ Evaluates response values from http test and verifies algorithm used"""
+        if algorithm == 'roundrobin':
+            # get a list of unique values
+            unique_values = set(responses)
+            # count the occurence of each value in the responses
+            dataset = [responses.count(value) for value in unique_values]
+
+            if len(set(dataset)) == 1:
+                # all values in dataset are equal, perfect result distribution
+                # woohoo!
+                self.logger.debug(
+                    "HTTP responses are evenly distributed! SUCCESS!")
+                return True
+            else:
+                # calculate mean, var, stddev on dataset
+                mean, variance, stddev = self.get_std_deviation(dataset)
+                for value in dataset:
+                    # determine how much value difference is there from the
+                    # mean
+                    difference = abs(value - mean)
+                    # difference between response count of a host and the mean
+                    # should be less than the standard deviation
+                    self.assertLess(
+                        difference, stddev, "Internal LB RoundRobin test Failed because http responsest are not evenly distributed")
+                    self.logger.debug(
+                        "Response distribution count: %d difference to mean: %d within standard deviation: %d" % (value, mean, stddev))
+                return True
+
+    @attr(tags=["smoke", "advanced"], required_hardware="true")
+    def test_01_internallb_roundrobin_1VPC_3VM_HTTP_port80(self):
+        """Test create, assign, remove of an Internal LB with roundrobin http traffic to 3 vm's
+        """
+        max_http_requests = 30
+        algorithm = "roundrobin"
+        public_lb_port = 80
+        private_http_port = 80
+        public_ssh_start_port = 2000
+        num_app_vms = 3
+
+        self.logger.debug("Starting test_01_internallb_roundrobin_1VPC_3VM_HTTP_port80")
+        # Create and enable network offerings
+        network_offering_guestnet = self.create_and_enable_network_serviceoffering(
+            self.services["network_offering"])
+        network_offering_intlb = self.create_and_enable_network_serviceoffering(
+            self.services["network_offering_internal_lb"])
+
+        # Create VPC
+        vpc = self.create_vpc("vpc_intlb_test01", "10.1.0.0/16")
+
+        # Create network tiers
+        network_guestnet = self.create_network_tier(
+            "guestnet_test01", vpc.id, "10.1.1.1",  network_offering_guestnet)
+        network_internal_lb = self.create_network_tier(
+            "intlb_test01", vpc.id, "10.1.2.1",  network_offering_intlb)
+
+        # Create 1 lb client vm in guestnet network tier
+        client_vm = self.deployvm_in_network(vpc, network_guestnet.id)
+
+        # Create X app vm's in internal lb network tier
+        app_vms = []
+        for x in range(0, num_app_vms):
+            vm = None
+            vm = self.deployvm_in_network(vpc, network_internal_lb.id)
+            app_vms.append(vm)
+
+        # Acquire public ip to access guestnet app vms
+        guestnet_public_ip = self.acquire_publicip(vpc, network_guestnet)
+        intlb_public_ip = self.acquire_publicip(vpc, network_internal_lb)
+
+        # Create nat rule to access client vm
+        self.create_natrule(vpc, client_vm, public_ssh_start_port, 22, guestnet_public_ip, network_guestnet)
+
+        # Create nat rule to access app vms directly and start a http daemon on
+        # the vm
+        public_port = public_ssh_start_port + 1
+        for vm in app_vms:
+            self.create_natrule(vpc, vm, public_port, 22, intlb_public_ip, network_internal_lb)
+            public_port += 1
+            time.sleep(10)
+            # start http daemon on vm's
+            self.setup_http_daemon(vm)
+
+        # Create a internal loadbalancer in the internal lb network tier
+        applb = self.create_internal_loadbalancer( private_http_port, public_lb_port, algorithm, network_internal_lb.id)
+        # wait for the loadbalancer to boot and be configured
+        time.sleep(10)
+        # Assign the 2 VMs to the Internal Load Balancer
+        self.logger.debug("Assigning virtual machines to LB: %s" % applb.id)
+        try:
+            applb.assign(self.apiclient, vms=app_vms)
+        except Exception as e:
+            self.fail(
+                "Failed to assign virtual machine(s) to loadbalancer: %s" % e)
+
+        time.sleep(120)
+        # self.logger.debug(dir(applb))
+        results = self.run_ssh_test_accross_hosts(
+            client_vm, applb.sourceipaddress, max_http_requests)
+        success = self.evaluate_http_responses(results, algorithm)
+        self.assertTrue(success, "Test failed on algorithm: %s" % algorithm)
+
+        self.logger.debug(
+            "Removing virtual machines and networks for test_01_internallb_roundrobin_2VM_port80")
+
+        # Remove the virtual machines from the Internal LoadBalancer
+        self.logger.debug("Remove virtual machines from LB: %s" % applb.id)
+        applb.remove(self.apiclient, vms=app_vms)
+
+        # Remove the Load Balancer
+        self.logger.debug("Deleting LB: %s" % applb.id)
         applb.delete(self.apiclient)
 
+    def get_lb_stats_settings(self):
+        self.logger.debug("Retrieving haproxy stats settings")
+        settings = {}
+        try:
+            settings["stats_port"] = Configurations.list(
+                self.apiclient, name="network.loadbalancer.haproxy.stats.port")[0].value
+            settings["stats_uri"] = Configurations.list(
+                self.apiclient, name="network.loadbalancer.haproxy.stats.uri")[0].value
+            settings["username"], settings["password"] = Configurations.list(
+                self.apiclient, name="network.loadbalancer.haproxy.stats.auth")[0].value.split(":")
+            settings["visibility"] = Configurations.list(
+                self.apiclient, name="network.loadbalancer.haproxy.stats.visibility")[0].value
+            self.logger.debug(settings)
+        except Exception as e:
+            self.fail("Failed to retrieve stats settings " % e)
+
+        return settings
+
+    def verify_lb_stats(self, stats_ip, ssh_client, settings):
+
+        word_to_verify = "uptime"
+
+        url = "http://" + stats_ip + ":" + \
+            settings["stats_port"] + settings["stats_uri"]
+        get_contents = "/usr/bin/wget -T3 -qO- --user=" + \
+            settings["username"] + " --password=" + \
+            settings["password"] + " " + url
+        try:
+            self.logger.debug(
+                "Trying to connect to the haproxy stats url %s" % url)
+            result = ssh_client.execute(get_contents)
+        except Exception as e:
+            self.fail("Failed to verify admin stats url %s from: %s" %
+                      (url, ssh_client))
+        finally:
+            del ssh_client
+
+        found = any(word_to_verify in word for word in result)
+
+        if found:
+            return True
+        else:
+            return False
+
+    @attr(tags=["smoke", "advanced"], required_hardware="true")
+    def test02_internallb_haproxy_stats_on_all_interfaces(self):
+        """ Test to verify access to loadbalancer haproxy admin stats page
+            when global setting network.loadbalancer.haproxy.stats.visibility is set to 'all'
+            with credentials from global setting network.loadbalancer.haproxy.stats.auth
+            using the uri from global setting network.loadbalancer.haproxy.stats.uri"""
+
+        self.logger.debug(
+            "Starting test_02_internallb_haproxy_stats_on_all_interfaces")
+
+        settings = self.get_lb_stats_settings()
+
+        dummy_port = 90
+        network_gw = "10.1.2.1"
+        default_visibility = "global"
+
+        # Update global setting if it is not set to our test default
+        if settings["visibility"] != default_visibility:
+            config_update = Configurations.update(
+                self.apiclient, "network.loadbalancer.haproxy.stats.visibility", default_visibility)
+            self.logger.debug(
+                "Updated global setting stats haproxy.stats.visibility to %s" % (default_visibility))
+            settings = self.get_lb_stats_settings()
+
+        # Create and enable network offering
+        network_offering_intlb = self.create_and_enable_network_serviceoffering(
+            self.services["network_offering_internal_lb"])
+
+        # Create VPC
+        vpc = self.create_vpc("vpc_intlb_test_02", "10.1.0.0/16")
+
+        # Create network tier with internal lb service enabled
+        network_internal_lb = self.create_network_tier(
+            "intlb_test02", vpc.id, network_gw,  network_offering_intlb)
+
+        # Create 1 lb vm in internal lb network tier
+        vm = self.deployvm_in_network(vpc, network_internal_lb.id)
+
+        # Acquire 1 public ip and attach to the internal lb network tier
+        public_ip = self.acquire_publicip(vpc, network_internal_lb)
+
+        # Create an internal loadbalancer in the internal lb network tier
+        applb = self.create_internal_loadbalancer(
+            dummy_port, dummy_port, "leastconn", network_internal_lb.id)
+
+        # Assign the 1 VM to the Internal Load Balancer
+        self.logger.debug("Assigning virtual machines to LB: %s" % applb.id)
+        try:
+            applb.assign(self.apiclient, vms=[vm])
+        except Exception as e:
+            self.fail(
+                "Failed to assign virtual machine(s) to loadbalancer: %s" % e)
+
+        # Create nat rule to access client vm
+        self.create_natrule(
+            vpc, vm, "22", "22", public_ip, network_internal_lb)
+
+        # Verify access to and the contents of the admin stats page on the
+        # private address via a vm in the internal lb tier
+        stats = self.verify_lb_stats(
+            applb.sourceipaddress, self.get_ssh_client(vm, 4), settings)
+        self.assertTrue(stats, "Failed to verify LB HAProxy stats")
+
     @classmethod
     def tearDownClass(cls):
         try:
+            cls.logger.debug("Cleaning up testcase resources")
             cleanup_resources(cls.apiclient, cls.cleanup)
-        except Exception, e:
-            raise Exception("Cleanup failed with %s" % e)
 
+        except Exception as e:
+            raise Exception("Cleanup failed with %s" % e)
diff --git a/test/integration/smoke/test_privategw_acl.py b/test/integration/smoke/test_privategw_acl.py
index cf0f8e3..e1dabc4 100644
--- a/test/integration/smoke/test_privategw_acl.py
+++ b/test/integration/smoke/test_privategw_acl.py
@@ -24,88 +24,239 @@
 from marvin.lib.common import *
 from nose.plugins.attrib import attr
 
+import logging
+
+class Services:
+    """Test VPC network services - Port Forwarding Rules Test Data Class.
+    """
+
+    def __init__(self):
+        self.services = {
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "host1": None,
+            "service_offering": {
+                "name": "Tiny Instance",
+                "displaytext": "Tiny Instance",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+            },
+            "network_offering": {
+                "name": 'VPC Network offering',
+                "displaytext": 'VPC Network off',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceProviderList": {
+                    "Vpn": 'VpcVirtualRouter',
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "Lb": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+            },
+            "network_offering_no_lb": {
+                "name": 'VPC Network offering',
+                "displaytext": 'VPC Network off',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceProviderList": {
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+            },
+            "vpc_offering": {
+                "name": "VPC off",
+                "displaytext": "VPC off",
+                "supportedservices":
+                    "Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL"
+            },
+            "vpc": {
+                "name": "TestVPC",
+                "displaytext": "TestVPC",
+                "cidr": '10.0.0.1/24'
+            },
+            "network": {
+                "name": "Test Network",
+                "displaytext": "Test Network",
+                "netmask": '255.255.255.0'
+            },
+            "virtual_machine": {
+                "displayname": "Test VM",
+                "username": "root",
+                "password": "password",
+                "ssh_port": 22,
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": 'TCP',
+            },
+            "ostype": 'CentOS 5.3 (64-bit)',
+            "timeout": 10,
+        }
 
 class TestPrivateGwACL(cloudstackTestCase):
-    def setUp(self):
-        self.apiClient = self.testClient.getApiClient()
-        self.networkOfferingId = 11
-        self.networkId = None
-        self.vmId = None
-        self.vpcId = None
-        self.aclId = None
-        self.zoneId = 1
-        self.serviceOfferingId = 1
-        self.templateId = 5
-        self.privateGwId = None
 
+    @classmethod
+    def setUpClass(cls):
+
+        cls.testClient = super(TestPrivateGwACL, cls).getClsTestClient()
+        cls.api_client = cls.testClient.getApiClient()
+
+        cls.services = Services().services
+        # Get Zone, Domain and templates
+        cls.domain = get_domain(cls.api_client)
+        cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
+        cls.template = get_template(
+            cls.api_client,
+            cls.zone.id,
+            cls.services["ostype"])
+        cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+        cls.services["virtual_machine"]["template"] = cls.template.id
+
+        cls.service_offering = ServiceOffering.create(
+            cls.api_client,
+            cls.services["service_offering"])
+        cls._cleanup = [cls.service_offering]
+
+        cls.logger = logging.getLogger('TestPrivateGwACL')
+        cls.stream_handler = logging.StreamHandler()
+        cls.logger.setLevel(logging.DEBUG)
+        cls.logger.addHandler(cls.stream_handler)
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            cleanup_resources(cls.api_client, cls._cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+
+        self.logger.debug("Creating Admin Account for Domain ID ==> %s" %self.domain.id)
+        self.account = Account.create(
+            self.apiclient,
+            self.services["account"],
+            admin=True,
+            domainid=self.domain.id)
+
+        self.logger.debug("Creating a VPC offering..")
+        self.vpc_off = VpcOffering.create(
+            self.apiclient,
+            self.services["vpc_offering"])
+
+        self.logger.debug("Enabling the VPC offering created")
+        self.vpc_off.update(self.apiclient, state='Enabled')
+
+        self.logger.debug("Creating a VPC network in the account: %s" % self.account.name)
+        self.services["vpc"]["cidr"] = '10.1.1.1/16'
+        self.vpc = VPC.create(
+            self.apiclient,
+            self.services["vpc"],
+            vpcofferingid=self.vpc_off.id,
+            zoneid=self.zone.id,
+            account=self.account.name,
+            domainid=self.account.domainid)
+        
+        self.cleanup = [self.vpc, self.vpc_off, self.account]
+        return
+
+    def tearDown(self):
+        try:
+            cleanup_resources(self.apiclient, self.cleanup)
+        except Exception as e:
+            raise Exception("Warning: Exception during cleanup : %s" % e)
+        return
 
     @attr(tags=["advanced"], required_hardware="false")
     def test_privategw_acl(self):
-        #TODO: SIMENH: add a new test to verification of ACL rules
-
-        # 1) Create VPC
-        self.createVPC()
-
-        # 2) Create ACl
         self.createACL()
-
-        # 3) Create ACl Item
         self.createACLItem()
-
-        # 4) Create network with ACL
         self.createNetwork()
-
-        # 5) create private gw
         self.createPvtGw()
-
-        # 6) update acl id
         self.replaceacl()
 
     def createACL(self):
         createAclCmd = createNetworkACLList.createNetworkACLListCmd()
         createAclCmd.name = "acl1"
         createAclCmd.description = "new acl"
-        createAclCmd.vpcid = self.vpcId
-        createAclResponse = self.apiClient.createNetworkACLList(createAclCmd)
+        createAclCmd.vpcid = self.vpc.id
+        createAclResponse = self.apiclient.createNetworkACLList(createAclCmd)
+
         self.aclId = createAclResponse.id
 
+        self.assertIsNotNone(self.aclId, "Failed to create ACL.")
+
     def createACLItem(self):
         createAclItemCmd = createNetworkACL.createNetworkACLCmd()
         createAclItemCmd.cidr = "0.0.0.0/0"
         createAclItemCmd.protocol = "TCP"
-        createAclItemCmd.number = "10"
+        createAclItemCmd.number = "1"
         createAclItemCmd.action = "Deny"
         createAclItemCmd.aclid = self.aclId
-        createAclItemResponse = self.apiClient.createNetworkACL(createAclItemCmd)
-        self.assertIsNotNone(createAclItemResponse.id, "Network failed to aclItem")
+        createAclItemResponse = self.apiclient.createNetworkACL(createAclItemCmd)
 
-    def createVPC(self):
-        createVPCCmd = createVPC.createVPCCmd()
-        createVPCCmd.name = "new vpc"
-        createVPCCmd.cidr = "10.1.1.0/24"
-        createVPCCmd.displaytext = "new vpc"
-        createVPCCmd.vpcofferingid = 1
-        createVPCCmd.zoneid = self.zoneId
-        createVPCResponse = self.apiClient.createVPC(createVPCCmd)
-        self.vpcId = createVPCResponse.id
-
+        self.assertIsNotNone(createAclItemResponse.id, "Failed to create ACL item.")
 
     def createNetwork(self):
-        createNetworkCmd = createNetwork.createNetworkCmd()
-        createNetworkCmd.name = "vpc network"
-        createNetworkCmd.displaytext = "vpc network"
-        createNetworkCmd.netmask = "255.255.255.0"
-        createNetworkCmd.gateway = "10.1.1.1"
-        createNetworkCmd.zoneid = self.zoneId
-        createNetworkCmd.vpcid = self.vpcId
-        createNetworkCmd.networkofferingid = self.networkOfferingId
-        createNetworkCmd.aclid = self.aclId
-        createNetworkResponse = self.apiClient.createNetwork(createNetworkCmd)
+        try:
+            self.logger.debug('Create NetworkOffering')
+            net_offerring = self.services["network_offering"]
+            net_offerring["name"] = "NET_OFF-10.1.1.1"
+            nw_off = NetworkOffering.create(
+                self.apiclient,
+                net_offerring,
+                conservemode=False)
 
-        self.assertIsNotNone(createNetworkResponse.id, "Network failed to create")
-        self.networkId = createNetworkResponse.id
+            nw_off.update(self.apiclient, state='Enabled')
 
+            self.logger.debug('Created and Enabled NetworkOffering')
 
+            self.services["network"]["name"] = "NETWORK-10.1.1.1"
+
+            self.logger.debug('Adding Network=%s' % self.services["network"])
+            obj_network = Network.create(
+                self.apiclient,
+                self.services["network"],
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                networkofferingid=nw_off.id,
+                zoneid=self.zone.id,
+                gateway="10.1.1.1",
+                vpcid=self.vpc.id
+            )
+
+            self.logger.debug("Created network with ID: %s" % obj_network.id)
+        except Exception, e:
+            self.fail('Unable to create a Network with offering=%s because of %s ' % (net_offerring, e))
+
+        self.network = obj_network
+
+        self.cleanup.insert(0, nw_off)
+        self.cleanup.insert(0, obj_network)
 
     def createPvtGw(self):
         createPrivateGatewayCmd = createPrivateGateway.createPrivateGatewayCmd()
@@ -114,18 +265,23 @@
         createPrivateGatewayCmd.netmask = "255.255.255.0"
         createPrivateGatewayCmd.ipaddress = "10.147.30.200"
         createPrivateGatewayCmd.vlan = "30"
-        createPrivateGatewayCmd.vpcid = self.vpcId
+        createPrivateGatewayCmd.vpcid = self.vpc.id
         createPrivateGatewayCmd.sourcenatsupported = "true"
         createPrivateGatewayCmd.aclid = self.aclId
-        privateGatewayResponse =  self.apiClient.createPrivateGateway(createPrivateGatewayCmd)
+
+        try:
+            privateGatewayResponse =  self.apiclient.createPrivateGateway(createPrivateGatewayCmd)
+        except Exception as e:
+            self.fail("Failed to create Private Gateway ==> %s" % e)
+
         self.privateGwId = privateGatewayResponse.id
 
+        self.assertIsNotNone(self.privateGwId, "Failed to create ACL.")
+
     def replaceacl(self):
         replaceNetworkACLListCmd = replaceNetworkACLList.replaceNetworkACLListCmd()
         replaceNetworkACLListCmd.aclid = self.aclId
         replaceNetworkACLListCmd.gatewayid = self.privateGwId
-        successResponse = self.apiClient.replaceNetworkACLList(replaceNetworkACLListCmd);
+        successResponse = self.apiclient.replaceNetworkACLList(replaceNetworkACLListCmd);
 
-    def tearDown(self):
-        #destroy the vm
-        return
+        self.assertTrue(successResponse.success, "Failed to replace ACL list.")
diff --git a/test/integration/smoke/test_ssvm.py b/test/integration/smoke/test_ssvm.py
index fc6abf7..9224893 100644
--- a/test/integration/smoke/test_ssvm.py
+++ b/test/integration/smoke/test_ssvm.py
@@ -47,6 +47,13 @@
         self.cleanup = []
         self.services = self.testClient.getParsedTestDataConfig()
         self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
+
+        # Default sleep is set to 90 seconds, which is too long if the SSVM takes up to 2min to start.
+        # Second sleep in the loop will waste test time.
+        self.services["sleep"] = 30
+        # Default value is 120 seconds. That's just too much.
+        self.services["configurableData"]["systemVmDelay"] = 60
+
         return
 
     def tearDown(self):
@@ -470,6 +477,47 @@
             1,
             "Check cloud service is running or not"
         )
+        
+        linklocal_ip = None
+        # Check status of cloud service
+        if self.hypervisor.lower() in ('vmware', 'hyperv'):
+            # SSH into SSVMs is done via management server for Vmware and
+            # Hyper-V
+            linklocal_ip = ssvm.privateip
+            result = get_process_status(
+                self.apiclient.connection.mgtSvr,
+                22,
+                self.apiclient.connection.user,
+                self.apiclient.connection.passwd,
+                ssvm.privateip,
+                "cat /var/cache/cloud/cmdline | xargs | sed \"s/ /\\n/g\" | grep eth0ip= | sed \"s/\=/ /g\" | awk '{print $2}'",
+                hypervisor=self.hypervisor
+            )
+        else:
+            try:
+                linklocal_ip = ssvm.linklocalip
+                host.user, host.passwd = get_host_credentials(
+                    self.config, host.ipaddress)
+                result = get_process_status(
+                    host.ipaddress,
+                    22,
+                    host.user,
+                    host.passwd,
+                    ssvm.linklocalip,
+                    "cat /var/cache/cloud/cmdline | xargs | sed \"s/ /\\n/g\" | grep eth0ip= | sed \"s/\=/ /g\" | awk '{print $2}'"
+                )
+            except KeyError:
+                self.skipTest(
+                    "Marvin configuration has no host\
+                            credentials to check router services")
+        res = result[0]
+        self.debug("Cached Link Local IP: %s" % res)
+        self.assertEqual(
+            linklocal_ip,
+            res,
+            "The cached Link Local should be the same as the current Link Local IP, but they are different! Current ==> %s; Cached ==> %s " % (linklocal_ip, res)
+        )
+        
         return
 
     @attr(
@@ -564,6 +612,47 @@
             1,
             "Check cloud service is running or not"
         )
+
+        linklocal_ip = None
+        # Check status of cloud service
+        if self.hypervisor.lower() in ('vmware', 'hyperv'):
+            # SSH into SSVMs is done via management server for Vmware and
+            # Hyper-V
+            linklocal_ip = cpvm.privateip
+            result = get_process_status(
+                self.apiclient.connection.mgtSvr,
+                22,
+                self.apiclient.connection.user,
+                self.apiclient.connection.passwd,
+                cpvm.privateip,
+                "cat /var/cache/cloud/cmdline | xargs | sed \"s/ /\\n/g\" | grep eth0ip= | sed \"s/\=/ /g\" | awk '{print $2}'",
+                hypervisor=self.hypervisor
+            )
+        else:
+            try:
+                linklocal_ip = cpvm.linklocalip
+                host.user, host.passwd = get_host_credentials(
+                    self.config, host.ipaddress)
+                result = get_process_status(
+                    host.ipaddress,
+                    22,
+                    host.user,
+                    host.passwd,
+                    cpvm.linklocalip,
+                    "cat /var/cache/cloud/cmdline | xargs | sed \"s/ /\\n/g\" | grep eth0ip= | sed \"s/\=/ /g\" | awk '{print $2}'"
+                )
+            except KeyError:
+                self.skipTest(
+                    "Marvin configuration has no host\
+                            credentials to check router services")
+        res = result[0]
+        self.debug("Cached Link Local IP: %s" % res)
+        self.assertEqual(
+            linklocal_ip,
+            res,
+            "The cached Link Local should be the same as the current Link Local IP, but they are different! Current ==> %s; Cached ==> %s " % (linklocal_ip, res)
+        )
+
         return
 
     @attr(
@@ -817,7 +906,7 @@
             old_public_ip,
             "Check Public IP after reboot with that of before reboot"
         )
-        
+
         # Private IP Address of System VMs are allowed to change after reboot - CLOUDSTACK-7745
 
         # Wait for the agent to be up
diff --git a/test/integration/smoke/test_vpc_vpn.py b/test/integration/smoke/test_vpc_vpn.py
index 0b78ad1..5a322d3 100644
--- a/test/integration/smoke/test_vpc_vpn.py
+++ b/test/integration/smoke/test_vpc_vpn.py
@@ -16,150 +16,474 @@
 # under the License.
 """ Tests for VPN in VPC
 """
-#Import Local Modules
-from marvin.codes import FAILED
-from marvin.cloudstackTestCase import *
-from marvin.cloudstackAPI import *
-from marvin.lib.utils import *
-from marvin.lib.base import *
-from marvin.lib.common import *
+# Import Local Modules
+from marvin.codes import PASS, FAILED
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.lib.utils import (cleanup_resources,
+                              get_process_status)
+
+from marvin.lib.base import (Domain,
+                             Account,
+                             Configurations,
+                             VPC,
+                             VpcOffering,
+                             ServiceOffering,
+                             NetworkOffering,
+                             Network,
+                             PublicIPAddress,
+                             NATRule,
+                             NetworkACL,
+                             NetworkACLList,
+                             LoadBalancerRule,
+                             ApplicationLoadBalancer,
+                             VirtualMachine,
+                             Template,
+                             FireWallRule,
+                             StaticNATRule,
+                             Vpn,
+                             VpnCustomerGateway,
+                             VpnUser
+                             )
+
+from marvin.sshClient import SshClient
+
+
+from marvin.lib.common import (get_zone,
+                               get_domain,
+                               get_template,
+                               list_network_offerings)
+
 from nose.plugins.attrib import attr
 
+import logging
 import time
 
+class Services:
+
+    """Test VPC VPN Services.
+    """
+
+    def __init__(self):
+        self.services = {
+            "account": {
+                "email": "test@test.com",
+                "firstname": "Test",
+                "lastname": "User",
+                "username": "test",
+                # Random characters are appended for unique
+                # username
+                "password": "password",
+            },
+            "host1": None,
+            "host2": None,
+            "default_hypervisor": "kvm",
+            "compute_offering": {
+                "name": "Tiny Instance",
+                "displaytext": "Tiny Instance",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+            },
+            "network_offering": {
+                "name": 'VPC Network offering',
+                "displaytext": 'VPC Network',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,Lb,PortForwarding,UserData,StaticNat,NetworkACL',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceProviderList": {
+                    "Vpn": 'VpcVirtualRouter',
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "Lb": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+            },
+            "network_offering_internal_lb": {
+                "name": 'VPC Network Internal Lb offering',
+                "displaytext": 'VPC Network internal lb',
+                "guestiptype": 'Isolated',
+                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL,Lb',
+                "traffictype": 'GUEST',
+                "availability": 'Optional',
+                "useVpc": 'on',
+                "serviceCapabilityList": {
+                    "Lb": {
+                        "SupportedLbIsolation": 'dedicated',
+                        "lbSchemes": 'internal'
+                    }
+                },
+                "serviceProviderList": {
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter',
+                    "Lb": 'InternalLbVm'
+                },
+                "egress_policy": "true",
+            },
+            "vpc_offering": {
+                "name": 'Redundant VPC off',
+                "displaytext": 'Redundant VPC off',
+                "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
+                "serviceProviderList": {
+                    "Vpn": 'VpcVirtualRouter',
+                    "Dhcp": 'VpcVirtualRouter',
+                    "Dns": 'VpcVirtualRouter',
+                    "SourceNat": 'VpcVirtualRouter',
+                    "PortForwarding": 'VpcVirtualRouter',
+                    "Lb": 'VpcVirtualRouter',
+                    "UserData": 'VpcVirtualRouter',
+                    "StaticNat": 'VpcVirtualRouter',
+                    "NetworkACL": 'VpcVirtualRouter'
+                },
+                "serviceCapabilityList": {
+                    "SourceNat": {
+                        "RedundantRouter": 'true'
+                    }
+                },
+            },
+            "vpc": {
+                "name": "TestVPC",
+                "displaytext": "TestVPC",
+                "cidr": '10.1.0.0/16'
+            },
+            "vpc1": {
+                "name": "TestVPC",
+                "displaytext": "VPC1",
+                "cidr": '10.1.0.0/16'
+            },
+            "vpc2": {
+                "name": "TestVPC",
+                "displaytext": "VPC2",
+                "cidr": '10.2.0.0/16'
+            },
+            "network_1": {
+                "name": "Test Network",
+                "displaytext": "Test Network",
+                "netmask": '255.255.255.0',
+                "gateway": "10.1.1.1"
+            },
+            "network_2": {
+                "name": "Test Network",
+                "displaytext": "Test Network",
+                "netmask": '255.255.255.0',
+                "gateway": "10.2.1.1"
+            },
+            "vpn": {
+                "vpn_user":"root",
+                "vpn_pass":"Md1s#dc",
+                "vpn_pass_fail":"abc!123", # too short
+                "iprange":"10.2.2.1-10.2.2.10",
+                "fordisplay": "true"
+            },
+            "vpncustomergateway": {
+                "esppolicy":"3des-md5;modp1536",
+                "ikepolicy":"3des-md5;modp1536",
+                "ipsecpsk":"ipsecpsk"
+            },
+            "natrule": {
+                "protocol": "TCP",
+                "cidrlist": '0.0.0.0/0',
+            },
+            "http_rule": {
+                "privateport": 80,
+                "publicport": 80,
+                "startport": 80,
+                "endport": 80,
+                "cidrlist": '0.0.0.0/0',
+                "protocol": "TCP"
+            },
+            "virtual_machine": {
+                "displayname": "Test VM",
+                "username": "root",
+                "password": "password",
+                "ssh_port": 22,
+                "privateport": 22,
+                "publicport": 22,
+                "protocol": 'TCP',
+            },
+            "template_kvm": {
+                "name": "tiny-kvm",
+                "displaytext": "macchinina kvm",
+                "format": "qcow2",
+                "hypervisor": "kvm",
+                "ostype": "Other PV (64-bit)",
+                "url": "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-kvm.qcow2.bz2",
+                "requireshvm": "True",
+            },
+            "template_xen": {
+                "name": "tiny-xen",
+                "displaytext": "macchinina xen",
+                "format": "vhd",
+                "hypervisor": "xen",
+                "ostype": "Other (64-bit)",
+                "url": "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-xen.vhd.bz2",
+                "requireshvm": "True",
+            },
+        }
+
+
 class TestVpcRemoteAccessVpn(cloudstackTestCase):
 
     @classmethod
     def setUpClass(cls):
+
+        cls.logger = logging.getLogger('TestVPCRemoteAccessVPN')
+        cls.stream_handler = logging.StreamHandler()
+        cls.logger.setLevel(logging.DEBUG)
+        cls.logger.addHandler(cls.stream_handler)
+        cls.startTime = time.time()
+
         testClient = super(TestVpcRemoteAccessVpn, cls).getClsTestClient()
         cls.apiclient = testClient.getApiClient()
-        cls.services = testClient.getParsedTestDataConfig()
+        cls.services = Services().services
 
         cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
         cls.domain = get_domain(cls.apiclient)
-        cls.service_offering = ServiceOffering.create(
+        cls.compute_offering = ServiceOffering.create(
             cls.apiclient,
-            cls.services["service_offerings"]["tiny"]
+            cls.services["compute_offering"]
         )
         cls.account = Account.create(cls.apiclient, services=cls.services["account"])
-        cls.template = get_template(
-            cls.apiclient,
-            cls.zone.id,
-            cls.services["ostype"]
-        )
-        if cls.template == FAILED:
-            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
 
+        if cls.services["default_hypervisor"] == "kvm":
+            cls.template = Template.register(cls.apiclient, cls.services["template_kvm"], cls.zone.id, hypervisor=cls.services[
+                "template_kvm"]["hypervisor"], account=cls.account.name, domainid=cls.domain.id)
+        else:
+            cls.template = Template.register(cls.apiclient, cls.services["template_xen"], cls.zone.id, hypervisor=cls.services[
+                "template_xen"]["hypervisor"], account=cls.account.name, domainid=cls.domain.id)
+
+        if cls.template == FAILED:
+            assert False, "get_template() failed to return template with description %s" % cls.services["compute_offering"]
+
+        cls.services["virtual_machine"]["hypervisor"] = cls.services["default_hypervisor"]
         cls.cleanup = [cls.account]
 
-   
+
     @attr(tags=["advanced"], required_hardware="false")
     def test_vpc_remote_access_vpn(self):
-        """Test VPN in VPC"""
+        """Test Remote Access VPN in VPC"""
 
         # 0) Get the default network offering for VPC
+        self.logger.debug("Retrieving default VPC offering")
         networkOffering = NetworkOffering.list(self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks")
         self.assert_(networkOffering is not None and len(networkOffering) > 0, "No VPC based network offering")
 
         # 1) Create VPC
         vpcOffering = VpcOffering.list(self.apiclient,isdefault=True)
         self.assert_(vpcOffering is not None and len(vpcOffering)>0, "No VPC offerings found")
-        vpc = VPC.create(
+
+        try:
+            vpc = VPC.create(
+                    apiclient=self.apiclient,
+                    services=self.services["vpc"],
+                    networkDomain="vpc.vpn",
+                    vpcofferingid=vpcOffering[0].id,
+                    zoneid=self.zone.id,
+                    account=self.account.name,
+                    domainid=self.domain.id
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assert_(vpc is not None, "VPC creation failed")
+            self.logger.debug("VPC %s created" %(vpc.id))
+
+        try:
+            # 2) Create network in VPC
+            ntwk = Network.create(
                 apiclient=self.apiclient,
-                services=self.services["vpc"],
-                networkDomain="vpc.vpn",
-                vpcofferingid=vpcOffering[0].id,
+                services=self.services["network_1"],
+                accountid=self.account.name,
+                domainid=self.domain.id,
+                networkofferingid=networkOffering[0].id,
                 zoneid=self.zone.id,
-                account=self.account.name,
-                domainid=self.domain.id
-        )
-        self.assert_(vpc is not None, "VPC creation failed")
-        self.debug("VPC %s created" %(vpc.id))
+                vpcid=vpc.id
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assertIsNotNone(ntwk, "Network failed to create")
+            self.logger.debug("Network %s created in VPC %s" %(ntwk.id, vpc.id))
 
-        # 2) Create network in VPC
-        ntwk = Network.create(
-            apiclient=self.apiclient,
-            services=self.services["ntwk"],
-            accountid=self.account.name,
-            domainid=self.domain.id,
-            networkofferingid=networkOffering[0].id,
-            zoneid=self.zone.id,
-            vpcid=vpc.id
-        )
-        self.assertIsNotNone(ntwk, "Network failed to create")
-        self.debug("Network %s created in VPC %s" %(ntwk.id, vpc.id))
+        try:
+            # 3) Deploy a vm
+            vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
+                templateid=self.template.id,
+                zoneid=self.zone.id,
+                accountid=self.account.name,
+                domainid= self.domain.id,
+                serviceofferingid=self.compute_offering.id,
+                networkids=ntwk.id,
+                hypervisor=self.services["virtual_machine"]["hypervisor"]
+            )
+            self.assert_(vm is not None, "VM failed to deploy")
+            self.assert_(vm.state == 'Running', "VM is not running")
+            self.debug("VM %s deployed in VPC %s" %(vm.id, vpc.id))
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.logger.debug("Deployed virtual machine: OK")
 
-        # 3) Deploy a vm
-        vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
-            templateid=self.template.id,
-            zoneid=self.zone.id,
-            accountid=self.account.name,
-            domainid= self.domain.id,
-            serviceofferingid=self.service_offering.id,
-            networkids=ntwk.id
-        )
-        self.assert_(vm is not None, "VM failed to deploy")
-        self.assert_(vm.state == 'Running', "VM is not running")
-        self.debug("VM %s deployed in VPC %s" %(vm.id, vpc.id))
+        try:
+            # 4) Enable VPN for VPC
+            src_nat_list = PublicIPAddress.list(
+                                            self.apiclient,
+                                            account=self.account.name,
+                                            domainid=self.account.domainid,
+                                            listall=True,
+                                            issourcenat=True,
+                                            vpcid=vpc.id
+                                            )
+            ip = src_nat_list[0]
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.logger.debug("Acquired public ip address: OK")
 
-        # 4) Enable VPN for VPC
+        try:
+            vpn = Vpn.create(self.apiclient,
+                             publicipid=ip.id,
+                             account=self.account.name,
+                             domainid=self.account.domainid,
+                             iprange=self.services["vpn"]["iprange"],
+                             fordisplay=self.services["vpn"]["fordisplay"]
+                             )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assertIsNotNone(vpn, "Failed to create Remote Access VPN")
+            self.logger.debug("Created Remote Access VPN: OK")
 
-        src_nat_list = PublicIPAddress.list(
-                                        self.apiclient,
-                                        account=self.account.name,
-                                        domainid=self.account.domainid,
-                                        listall=True,
-                                        issourcenat=True,
-                                        vpcid=vpc.id
-                                        )
-        ip = src_nat_list[0]
-        vpn = Vpn.create(self.apiclient,
-                         publicipid=ip.id,
-                         account=self.account.name,
-                         domainid=self.account.domainid)
-
+        vpnUser = None
         # 5) Add VPN user for VPC
-        vpnUser = VpnUser.create(self.apiclient,
-                                 account=self.account.name,
-                                 domainid=self.account.domainid,
-                                 username=self.services["vpn_user"]["username"],
-                                 password=self.services["vpn_user"]["password"])
+        try:
+            vpnUser = VpnUser.create(self.apiclient,
+                                     account=self.account.name,
+                                     domainid=self.account.domainid,
+                                     username=self.services["vpn"]["vpn_user"],
+                                     password=self.services["vpn"]["vpn_pass"]
+                                     )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assertIsNotNone(vpnUser, "Failed to create Remote Access VPN User")
+            self.logger.debug("Created VPN User: OK")
 
-        # 6) Disable VPN for VPC
-        vpn.delete(self.apiclient)
+
+        #TODO: Add an actual remote vpn connection test from a remote vpc
+
+        try:
+            # 9) Disable VPN for VPC
+            vpn.delete(self.apiclient)
+        except Exceptio as e:
+            self.fail(e)
+        finally:
+            self.logger.debug("Deleted the Remote Access VPN: OK")
+
 
     @classmethod
     def tearDownClass(cls):
+        total_time = time.time() - cls.startTime
+        cls.logger.debug("%.3f" % (total_time))
         try:
+            cls.logger.debug("Cleaning up resources")
             cleanup_resources(cls.apiclient, cls.cleanup)
         except Exception, e:
             raise Exception("Cleanup failed with %s" % e)
 
+
 class TestVpcSite2SiteVpn(cloudstackTestCase):
 
     @classmethod
     def setUpClass(cls):
+        cls.logger = logging.getLogger('TestVPCSite2SiteVPN')
+        cls.stream_handler = logging.StreamHandler()
+        cls.logger.setLevel(logging.DEBUG)
+        cls.logger.addHandler(cls.stream_handler)
+
         testClient = super(TestVpcSite2SiteVpn, cls).getClsTestClient()
         cls.apiclient = testClient.getApiClient()
-        cls.services = testClient.getParsedTestDataConfig()
+        cls.services = Services().services
 
         cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
         cls.domain = get_domain(cls.apiclient)
         cls.service_offering = ServiceOffering.create(
             cls.apiclient,
-            cls.services["service_offerings"]["tiny"]
+            cls.services["compute_offering"]
         )
-        cls.account = Account.create(cls.apiclient, services=cls.services["account"])
-        cls.template = get_template(
-            cls.apiclient,
-            cls.zone.id,
-            cls.services["ostype"]
-        )
-        if cls.template == FAILED:
-            assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
 
+        cls.account = Account.create(cls.apiclient, services=cls.services["account"])
+        if cls.services["default_hypervisor"] == "kvm":
+            cls.template = Template.register(cls.apiclient, cls.services["template_kvm"], cls.zone.id, hypervisor=cls.services[
+                "template_kvm"]["hypervisor"], account=cls.account.name, domainid=cls.domain.id)
+        else:
+            cls.template = Template.register(cls.apiclient, cls.services["template_xen"], cls.zone.id, hypervisor=cls.services[
+                "template_xen"]["hypervisor"], account=cls.account.name, domainid=cls.domain.id)
+
+        if cls.template == FAILED:
+            assert False, "get_template() failed to return template with description %s" % cls.services["compute_offering"]
+
+        cls.services["virtual_machine"]["hypervisor"] = cls.services["default_hypervisor"]
         cls.cleanup = [cls.account]
 
+
+    def get_ssh_client(self, virtual_machine, services, retries):
+        """ Setup ssh client connection and return connection
+        vm requires attributes public_ip, public_port, username, password """
+
+        try:
+            ssh_client = SshClient(
+                virtual_machine.public_ip,
+                services["virtual_machine"]["ssh_port"],
+                services["virtual_machine"]["username"],
+                services["virtual_machine"]["password"],
+                retries)
+
+        except Exception as e:
+            self.fail("Unable to create ssh connection: " % e)
+
+        self.assertIsNotNone(
+            ssh_client, "Failed to setup ssh connection to vm=%s on public_ip=%s" % (virtual_machine.name, virtual_machine.public_ip))
+
+        return ssh_client
+
+    def create_natrule(self, vpc, vm, public_port, private_port, public_ip, network, services=None):
+        self.logger.debug("Creating NAT rule in network for vm with public IP")
+        if not services:
+            self.services["natrule"]["privateport"] = private_port
+            self.services["natrule"]["publicport"] = public_port
+            self.services["natrule"]["startport"] = public_port
+            self.services["natrule"]["endport"] = public_port
+            services = self.services["natrule"]
+
+        nat_rule = NATRule.create(
+            apiclient=self.apiclient,
+            services=services,
+            ipaddressid=public_ip.ipaddress.id,
+            virtual_machine=vm,
+            networkid=network.id
+        )
+        self.assertIsNotNone(
+            nat_rule, "Failed to create NAT Rule for %s" % public_ip.ipaddress.ipaddress)
+        self.logger.debug(
+            "Adding NetworkACL rules to make NAT rule accessible")
+
+        vm.ssh_ip = nat_rule.ipaddress
+        vm.public_ip = nat_rule.ipaddress
+        vm.public_port = int(public_port)
+        return nat_rule
+
+
     @attr(tags=["advanced"], required_hardware="false")
     def test_vpc_site2site_vpn(self):
         """Test VPN in VPC"""
@@ -168,99 +492,134 @@
         networkOffering = NetworkOffering.list(self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks")
         self.assert_(networkOffering is not None and len(networkOffering) > 0, "No VPC based network offering")
 
-        # 1) Create VPC
+        # 1) Create VPC offering
         vpcOffering = VpcOffering.list(self.apiclient,isdefault=True)
         self.assert_(vpcOffering is not None and len(vpcOffering)>0, "No VPC offerings found")
 
-        vpc1 = VPC.create(
+        # Create VPC 1
+        try:
+            vpc1 = VPC.create(
+                    apiclient=self.apiclient,
+                    services=self.services["vpc"],
+                    networkDomain="vpc1.vpn",
+                    vpcofferingid=vpcOffering[0].id,
+                    zoneid=self.zone.id,
+                    account=self.account.name,
+                    domainid=self.domain.id
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assert_(vpc1 is not None, "VPC1 creation failed")
+
+        self.logger.debug("VPC1 %s created" %(vpc1.id))
+
+        # Create VPC 2
+        try:
+            vpc2 = VPC.create(
+                    apiclient=self.apiclient,
+                    services=self.services["vpc2"],
+                    networkDomain="vpc2.vpn",
+                    vpcofferingid=vpcOffering[0].id,
+                    zoneid=self.zone.id,
+                    account=self.account.name,
+                    domainid=self.account.domainid
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assert_(vpc2 is not None, "VPC2 creation failed")
+
+        self.logger.debug("VPC2 %s created" %(vpc2.id))
+
+        default_acl = NetworkACLList.list(self.apiclient, name="default_allow")[0]
+
+        # Create network in VPC 1
+        try:
+            ntwk1 = Network.create(
                 apiclient=self.apiclient,
-                services=self.services["vpc"],
-                networkDomain="vpc1.vpn",
-                vpcofferingid=vpcOffering[0].id,
+                services=self.services["network_1"],
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                networkofferingid=networkOffering[0].id,
                 zoneid=self.zone.id,
-                account=self.account.name,
-                domainid=self.domain.id
-        )
-        self.assert_(vpc1 is not None, "VPC creation failed")
-        self.debug("VPC1 %s created" %(vpc1.id))
+                vpcid=vpc1.id,
+                aclid=default_acl.id
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assertIsNotNone(ntwk1, "Network failed to create")
 
-        vpc2 = VPC.create(
+        self.logger.debug("Network %s created in VPC %s" %(ntwk1.id, vpc1.id))
+
+        # Create network in VPC 2
+        try:
+            ntwk2 = Network.create(
                 apiclient=self.apiclient,
-                services=self.services["vpc2"],
-                networkDomain="vpc2.vpn",
-                vpcofferingid=vpcOffering[0].id,
+                services=self.services["network_2"],
+                accountid=self.account.name,
+                domainid=self.account.domainid,
+                networkofferingid=networkOffering[0].id,
                 zoneid=self.zone.id,
-                account=self.account.name,
-                domainid=self.domain.id
-        )
-        self.assert_(vpc2 is not None, "VPC2 creation failed")
-        self.debug("VPC2 %s created" %(vpc1.id))
+                vpcid=vpc2.id,
+                aclid=default_acl.id
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assertIsNotNone(ntwk2, "Network failed to create")
 
-        # 2) Create network in VPC
-        ntwk1 = Network.create(
-            apiclient=self.apiclient,
-            services=self.services["ntwk"],
-            accountid=self.account.name,
-            domainid=self.domain.id,
-            networkofferingid=networkOffering[0].id,
-            zoneid=self.zone.id,
-            vpcid=vpc1.id
-        )
-        self.assertIsNotNone(ntwk1, "Network failed to create")
-        self.debug("Network %s created in VPC %s" %(ntwk1.id, vpc1.id))
+        self.logger.debug("Network %s created in VPC %s" %(ntwk2.id, vpc2.id))
 
-        ntwk2 = Network.create(
-            apiclient=self.apiclient,
-            services=self.services["ntwk2"],
-            accountid=self.account.name,
-            domainid=self.domain.id,
-            networkofferingid=networkOffering[0].id,
-            zoneid=self.zone.id,
-            vpcid=vpc2.id
-        )
-        self.assertIsNotNone(ntwk2, "Network failed to create")
-        self.debug("Network %s created in VPC %s" %(ntwk2.id, vpc2.id))
+        # Deploy a vm in network 2
+        try:
+            vm1 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
+                templateid=self.template.id,
+                zoneid=self.zone.id,
+                accountid=self.account.name,
+                domainid= self.account.domainid,
+                serviceofferingid=self.service_offering.id,
+                networkids=ntwk1.id,
+                hypervisor=self.services["virtual_machine"]["hypervisor"]
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assert_(vm1 is not None, "VM failed to deploy")
+            self.assert_(vm1.state == 'Running', "VM is not running")
 
-        # 3) Deploy a vm
-        vm1 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
-            templateid=self.template.id,
-            zoneid=self.zone.id,
-            accountid=self.account.name,
-            domainid= self.domain.id,
-            serviceofferingid=self.service_offering.id,
-            networkids=ntwk1.id
-        )
-        self.assert_(vm1 is not None, "VM failed to deploy")
-        self.assert_(vm1.state == 'Running', "VM is not running")
-        self.debug("VM %s deployed in VPC %s" %(vm1.id, vpc1.id))
+        self.logger.debug("VM %s deployed in VPC %s" %(vm1.id, vpc1.id))
 
-        vm2 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
-            templateid=self.template.id,
-            zoneid=self.zone.id,
-            accountid=self.account.name,
-            domainid= self.domain.id,
-            serviceofferingid=self.service_offering.id,
-            networkids=ntwk2.id
-        )
-        self.assert_(vm2 is not None, "VM failed to deploy")
-        self.assert_(vm2.state == 'Running', "VM is not running")
+        # Deploy a vm in network 2
+        try:
+            vm2 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
+                templateid=self.template.id,
+                zoneid=self.zone.id,
+                accountid=self.account.name,
+                domainid= self.account.domainid,
+                serviceofferingid=self.service_offering.id,
+                networkids=ntwk2.id,
+                hypervisor=self.services["virtual_machine"]["hypervisor"]
+            )
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assert_(vm2 is not None, "VM failed to deploy")
+            self.assert_(vm2.state == 'Running', "VM is not running")
+
         self.debug("VM %s deployed in VPC %s" %(vm2.id, vpc2.id))
 
         # 4) Enable Site-to-Site VPN for VPC
-        cmd=createVpnGateway.createVpnGatewayCmd()
-        cmd.vpcid=vpc1.id
-        vpn1_response = self.apiclient.createVpnGateway(cmd)
+        vpn1_response = Vpn.createVpnGateway(self.apiclient, vpc1.id)
+        self.assert_(vpn1_response is not None, "Failed to enable VPN Gateway 1")
+        self.logger.debug("VPN gateway for VPC %s enabled" % vpc1.id)
 
-        self.debug("VPN gateway for VPC %s enabled" % (vpc1.id))
-
-        cmd=createVpnGateway.createVpnGatewayCmd()
-        cmd.vpcid=vpc2.id
-        vpn2_response = self.apiclient.createVpnGateway(cmd)
-
-        self.debug("VPN gateway for VPC %s enabled" %(vpc2.id))
+        vpn2_response = Vpn.createVpnGateway(self.apiclient, vpc2.id)
+        self.assert_(vpn2_response is not None, "Failed to enable VPN Gateway 2")
+        self.logger.debug("VPN gateway for VPC %s enabled" % vpc2.id)
 
         # 5) Add VPN Customer gateway info
-
         src_nat_list = PublicIPAddress.list(
                                         self.apiclient,
                                         account=self.account.name,
@@ -270,7 +629,6 @@
                                         vpcid=vpc1.id
                                         )
         ip1 = src_nat_list[0]
-
         src_nat_list = PublicIPAddress.list(
                                         self.apiclient,
                                         account=self.account.name,
@@ -281,40 +639,58 @@
                                         )
         ip2 = src_nat_list[0]
 
-        cmd=createVpnCustomerGateway.createVpnCustomerGatewayCmd()
-        cmd.esppolicy="3des-md5;modp1536"
-        cmd.ikepolicy="3des-md5;modp1536"
-        cmd.domainid=self.account.domainid
-        cmd.account=self.account.name
-        cmd.ipsecpsk="ipsecpsk"
+        services = self.services["vpncustomergateway"]
+        customer1_response = VpnCustomerGateway.create(self.apiclient, services, "Peer VPC1", ip1.ipaddress, vpc1.cidr, self.account.name, self.domain.id )
+        self.debug("VPN customer gateway added for VPC %s enabled" % vpc1.id )
+        self.logger.debug(vars(customer1_response))
 
-        cmd.name="Peer VPC1"
-        cmd.gateway=ip1.ipaddress
-        cmd.cidrlist=vpc1.cidr
-        customer1_response = self.apiclient.createVpnCustomerGateway(cmd)
-        self.debug("VPN customer gateway added for VPC %s enabled" %(vpc1.id))
+        customer2_response = VpnCustomerGateway.create(self.apiclient, services, "Peer VPC2", ip2.ipaddress, vpc2.cidr, self.account.name, self.domain.id )
+        self.debug("VPN customer gateway added for VPC %s enabled" % vpc2.id )
+        self.logger.debug(vars(customer2_response))
 
-        cmd.name="Peer VPC2"
-        cmd.gateway=ip2.ipaddress
-        cmd.cidrlist=vpc2.cidr
-        customer2_response = self.apiclient.createVpnCustomerGateway(cmd)
-        self.debug("VPN customer gateway added for VPC %s enabled" %(vpc2.id))
+        # 6) Connect two VPCs
+        vpnconn1_response = Vpn.createVpnConnection(self.apiclient, customer1_response.id, vpn2_response['id'], True)
+        self.debug("VPN passive connection created for VPC %s" % vpc2.id)
 
-        # 6) Connect two VPCs 
-        cmd = createVpnConnection.createVpnConnectionCmd()
-        cmd.s2svpngatewayid = vpn2_response.id
-        cmd.s2scustomergatewayid = customer1_response.id
-        cmd.passive="true"
-        vpnconn1_response = self.apiclient.createVpnConnection(cmd)
-        self.debug("VPN passive connection created for VPC %s" %(vpc2.id))
+        vpnconn2_response = Vpn.createVpnConnection(self.apiclient, customer2_response.id, vpn1_response['id'])
+        self.debug("VPN connection created for VPC %s" % vpc1.id)
 
-        cmd = createVpnConnection.createVpnConnectionCmd()
-        cmd.s2svpngatewayid = vpn1_response.id
-        cmd.s2scustomergatewayid = customer2_response.id
-        vpnconn2_response = self.apiclient.createVpnConnection(cmd)
-        self.debug("VPN connection created for VPC %s" %(vpc1.id))
+        self.assertEqual(vpnconn2_response['state'], "Connected", "Failed to connect between VPCs!")
 
-        self.assertEqual(vpnconn2_response.state, "Connected", "Failed to connect between VPCs!")
+        # acquire an extra ip address to use to ssh into vm2
+        try:
+            vm2.public_ip = PublicIPAddress.create(
+                                    apiclient=self.apiclient,
+                                    accountid=self.account.name,
+                                    zoneid=self.zone.id,
+                                    domainid=self.account.domainid,
+                                    services=self.services,
+                                    networkid=ntwk2.id,
+                                    vpcid=vpc2.id)
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assert_(vm2.public_ip is not None, "Failed to aqcuire public ip for vm2")
+
+
+        # Create port forward to be able to ssh into vm2
+        try:
+            natrule = self.create_natrule(vpc2, vm2, 22, 22, vm2.public_ip, ntwk2)
+        except Exception as e:
+            self.fail(e)
+        finally:
+            self.assert_(natrule is not None, "Failed to create portforward for vm2")
+            time.sleep(10)
+
+        # setup ssh connection to vm2
+        ssh_client = self.get_ssh_client(vm2, self.services, 10)
+
+        if ssh_client:
+            # run ping test
+            packet_loss = ssh_client.execute("/bin/ping -c 3 -t 10 " + vm1.nic[0].ipaddress + " |grep packet|cut -d ' ' -f 7| cut -f1 -d'%'")[0]
+            self.assert_(int(packet_loss) == 0, "Ping did not succeed")
+        else:
+            self.fail("Failed to setup ssh connection to %s" %vm2.public_ip)
 
     @classmethod
     def tearDownClass(cls):
diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml
index e309bcc..ba4fa3a 100644
--- a/tools/apidoc/pom.xml
+++ b/tools/apidoc/pom.xml
@@ -32,6 +32,12 @@
         <artifactId>cloud-server</artifactId>
         <version>${project.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.apache.cloudstack</groupId>
+        <artifactId>cloud-client-ui</artifactId>
+        <version>${project.version}</version>
+	<type>pom</type>
+      </dependency>
     </dependencies>
 
     <build>
diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py
index b607b68..b7d2781 100644
--- a/tools/marvin/marvin/config/test_data.py
+++ b/tools/marvin/marvin/config/test_data.py
@@ -780,7 +780,7 @@
         "name": "Centos",
         "passwordenabled": False,
         "ostype": "CentOS 6.3 (64-bit)",
-        "url": "http://10.147.28.7/templates/centos63.ova",
+        "url": "http://people.apache.org/~sanjeev/centos63.ova",
         "format": "OVA",
         "ispublic": "true"
     },
@@ -798,7 +798,7 @@
         "displaytext": "Windows 7 (64-bit)",
         "name": "Windows 7 (64-bit)",
         "passwordenabled": False,
-        "url": "http://10.147.28.7/templates/windows7.vhd",
+        "url": "http://people.apache.org/~sanjeev/windows7.vhd",
         "format": "VHD",
         "ostype": "Windows 7 (64-bit)",
         "ispublic": "true",
@@ -830,7 +830,7 @@
         "displaytext": "win2012",
         "name": "win2012",
         "passwordenabled": False,
-        "url": "http://nfs1.lab.vmops.com/templates/vmware/new-test-win.ova",
+        "url": "http://people.apache.org/~sanjeev/new-test-win.ova",
         "format": "OVA",
         "ostype": "Windows 8 (64-bit)",
     },
@@ -838,7 +838,7 @@
         "displaytext": "Rhel60",
         "name": "Rhel60",
         "passwordenabled": False,
-        "url": "http://10.147.28.7/templates/Rhel/Rhel6-64bit.ova",
+        "url": "http://people.apache.org/~sanjeev/Rhel6-64bit.ova",
         "format": "OVA",
         "ostype": "Red Hat Enterprise Linux 6.0 (64-bit)"
     },
@@ -846,7 +846,7 @@
         "displaytext": "xs",
         "name": "xs",
         "passwordenabled": False,
-        "url": "http://10.147.28.7/templates/ttylinux_pv.vhd.bz2",
+        "url": "http://people.apache.org/~sanjeev/ttylinux_pv.vhd.bz2",
         "format": "VHD"
     },
     "security_group": {"name": "custom_Sec_Grp"},
@@ -1058,7 +1058,7 @@
                               "displaytext": "RHEL7 (64-bit)",
                               "name": "RHEL 7 Insta1",
                               "passwordenabled": False,
-                              "url": "http://10.147.28.7/templates/Rhel/RHEL764bit.vhd",
+                              "url": "http://people.apache.org/~sanjeev/RHEL764bitwithtools.vhd",
                               "format": "VHD" ,
                               "ostype": "RHEL 7 (64-bit)",
                               "ispublic": "true",
@@ -1512,7 +1512,7 @@
                               "displaytext": "Windows 8 (64-bit)",
                               "name": "win8withpvxen",
                               "passwordenabled": False,
-                              "url": "http://10.147.28.7/templates/sailajaxd/XS65pvtemplates/win8/79211594-1d4a-4dee-ae6c-c5c315ded2be.vhd",
+                              "url": "http://people.apache.org/~sanjeev/79211594-1d4a-4dee-ae6c-c5c315ded2be.vhd",
                               "format": "VHD" ,
                               "ostype": "Windows 8 (64-bit)",
                               "ispublic": "true",
@@ -1541,17 +1541,17 @@
     "browser_upload_volume":{
           "VHD": {
         "diskname": "XenUploadVol",
-        "url": "http://10.147.28.7/templates/rajani-thin-volume.vhd",
+        "url": "http://people.apache.org/~sanjeev/rajani-thin-volume.vhd",
         "checksum": "09b08b6abb1b903fca7711d3ac8d6598",
                 },
           "OVA": {
         "diskname": "VMwareUploadVol",
-        "url": "http://10.147.28.7/templates/Autoscale_Template/CentOS5.5(64bit)-vmware-autoscale.ova",
+        "url": "http://people.apache.org/~sanjeev/CentOS5.5(64bit)-vmware-autoscale.ova",
         "checksum": "da997b697feaa2f1f6e0d4785b0cece2",
                 },
           "QCOW2": {
         "diskname": "KVMUploadVol",
-        "url": "http://10.147.28.7/templates/rajani-thin-volume.qcow2",
+        "url": "http://people.apache.org/~sanjeev/rajani-thin-volume.qcow2",
         "checksum": "02de0576dd3a61ab59c03fd795fc86ac",
                 },
     'browser_resized_disk_offering': {
@@ -1564,7 +1564,7 @@
           "VHD": {
         "templatename": "XenUploadtemplate",
         "displaytext": "XenUploadtemplate",
-        "url": "http://10.147.28.7/templates/builtin/centos56-x86_64.vhd.bz2",
+        "url": "http://people.apache.org/~sanjeev/centos56-x86_64.vhd.bz2",
         "hypervisor":"XenServer",
         "checksum": "09b08b6abb1b903fca7711d3ac8d6598",
         "ostypeid":"74affaea-c658-11e4-ad38-a6d1374244b4"
@@ -1572,7 +1572,7 @@
           "OVA": {
         "templatename": "VMwareUploadtemplate",
         "displaytext": "VMwareUploadtemplate",
-        "url": "http://nfs1.lab.vmops.com/templates/vmware/CentOS5.3-x86_64.ova",
+        "url": "http://people.apache.org/~sanjeev/CentOS5.3-x86_64.ova",
         "checksum": "02de0576dd3a61ab59c03fd795fc86ac",
         "hypervisor":"VMware",
         "ostypeid":"74affaea-c658-11e4-ad38-a6d1374244b4"
@@ -1580,7 +1580,7 @@
           "QCOW2": {
         "templatename": "KVMUploadtemplate",
         "displaytext": "VMwareUploadtemplate",
-        "url": "http://10.147.28.7/templates/builtin/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2",
+        "url": "http://people.apache.org/~sanjeev/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2",
         "checksum": "da997b697feaa2f1f6e0d4785b0cece2",
         "hypervisor":"KVM",
         "ostypeid":"2e02e376-cdf3-11e4-beb3-8aa6272b57ef"
@@ -1655,7 +1655,7 @@
                     "name": "testISO",
                     "bootable": True,
                     "ispublic": False,
-                    "url": "http://10.147.40.145/ISO/CentOS-6.3-x86_64-bin-DVD1.iso",
+                    "url": "http://people.apache.org/~sanjeev/CentOS-6.3-x86_64-bin-DVD1.iso",
                     "ostype": 'CentOS 6.3 (64-bit)',
                     "mode": 'HTTP_DOWNLOAD'
         },
@@ -1670,7 +1670,7 @@
             "isextractable": True,
             "mode": "HTTP_DOWNLOAD",
             "templatefilter": "self",
-            "url": "http://10.147.28.7/templates/4.3.0.2/systemvm64template-2014-09-30-4.3-vmware.ova",
+            "url": "http://people.apache.org/~sanjeev/systemvm64template-2014-09-30-4.3-vmware.ova",
             "hypervisor": "vmware",
             "format": "OVA",
             "nicadapter": "vmxnet3",
diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py
index 54922c8..fc49fd2 100755
--- a/tools/marvin/marvin/lib/base.py
+++ b/tools/marvin/marvin/lib/base.py
@@ -2936,7 +2936,7 @@
 
     @classmethod
     def create(cls, apiclient, publicipid, account=None, domainid=None,
-               projectid=None, networkid=None, vpcid=None, openfirewall=None):
+               projectid=None, networkid=None, vpcid=None, openfirewall=None, iprange=None, fordisplay=False):
         """Create VPN for Public IP address"""
         cmd = createRemoteAccessVpn.createRemoteAccessVpnCmd()
         cmd.publicipid = publicipid
@@ -2950,8 +2950,12 @@
             cmd.networkid = networkid
         if vpcid:
             cmd.vpcid = vpcid
+        if iprange:
+            cmd.iprange = iprange
         if openfirewall:
             cmd.openfirewall = openfirewall
+
+        cmd.fordisplay = fordisplay
         return Vpn(apiclient.createRemoteAccessVpn(cmd).__dict__)
 
     @classmethod
@@ -2962,11 +2966,13 @@
         return (apiclient.createVpnGateway(cmd).__dict__)
 
     @classmethod
-    def createVpnConnection(cls, apiclient, s2scustomergatewayid, s2svpngatewayid):
+    def createVpnConnection(cls, apiclient, s2scustomergatewayid, s2svpngatewayid, passive=False):
         """Create VPN Connection """
         cmd = createVpnConnection.createVpnConnectionCmd()
         cmd.s2scustomergatewayid = s2scustomergatewayid
         cmd.s2svpngatewayid = s2svpngatewayid
+        if passive:
+            cmd.passive = passive
         return (apiclient.createVpnGateway(cmd).__dict__)
 
     @classmethod
diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml
index 5d7396e..bb905b4 100644
--- a/tools/marvin/pom.xml
+++ b/tools/marvin/pom.xml
@@ -19,6 +19,16 @@
     <version>4.6.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.cloudstack</groupId>
+      <artifactId>cloud-apidoc</artifactId>
+      <version>${project.version}</version>
+      <type>pom</type>
+    </dependency>
+  </dependencies>
+
   <build>
     <defaultGoal>install</defaultGoal>
     <plugins>