Explicit node start timeouts

Some tests require longer start timeout than default 90s:
* bootstrap with reset state
* node replacement
* cdc tests (due to checks for other seeds connectivity)

Before: use default timeout, 90s or rather 600s (due to bug in ccm)
After: use explicit timeout per test case: 120s or 180s

 patch by Tomek Lasica; reviewed by Mick Semb Wever for CASSANDRA-16395
diff --git a/bootstrap_test.py b/bootstrap_test.py
index 599bf66..1184b8c 100644
--- a/bootstrap_test.py
+++ b/bootstrap_test.py
@@ -415,8 +415,8 @@
         node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"])
         # check if we reset bootstrap state
         node3.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
-        # wait for node3 ready to query
-        node3.wait_for_binary_interface(from_mark=mark)
+        # wait for node3 ready to query, 180s as the node needs to bootstrap
+        node3.wait_for_binary_interface(from_mark=mark, timeout=180)
 
         # check if 2nd bootstrap succeeded
         assert_bootstrap_state(self, node3, 'COMPLETED')
diff --git a/cdc_test.py b/cdc_test.py
index df21f7d..87d337b 100644
--- a/cdc_test.py
+++ b/cdc_test.py
@@ -547,7 +547,7 @@
         logger.debug('adding node')
         self.cluster.add(loading_node, is_seed=True)
         logger.debug('starting new node')
-        loading_node.start(wait_for_binary_proto=True)
+        loading_node.start(wait_for_binary_proto=120)
         logger.debug('recreating ks and table')
         loading_session = self.patient_exclusive_cql_connection(loading_node)
         create_ks(loading_session, ks_name, rf=1)
@@ -615,7 +615,7 @@
             os.path.join(generation_node.get_path(), 'cdc_raw'),
             os.path.join(loading_node.get_path(), 'commitlogs')
         )
-        loading_node.start(wait_for_binary_proto=True)
+        loading_node.start(wait_for_binary_proto=120)
         logger.debug('node successfully started; waiting on log replay')
         loading_node.grep_log('Log replay complete')
         logger.debug('log replay complete')
diff --git a/disk_balance_test.py b/disk_balance_test.py
index bfd3d8e..1c1a759 100644
--- a/disk_balance_test.py
+++ b/disk_balance_test.py
@@ -97,7 +97,7 @@
                      binary_interface=(node5_address, 9042))
         self.cluster.add(node5, False)
         node5.start(jvm_args=["-Dcassandra.replace_address_first_boot={}".format(node2.address())],
-                    wait_for_binary_proto=True,
+                    wait_for_binary_proto=180,
                     wait_other_notice=True)
 
         logger.debug("Checking replacement node is balanced")
diff --git a/replace_address_test.py b/replace_address_test.py
index 2fa76f9..0645571 100644
--- a/replace_address_test.py
+++ b/replace_address_test.py
@@ -484,13 +484,13 @@
         if mode == 'reset_resume_state':
             mark = self.replacement_node.mark_log()
             logger.debug("Restarting replacement node with -Dcassandra.reset_bootstrap_progress=true")
-            # restart replacement node with resetting bootstrap state
+            # restart replacement node with resetting bootstrap state (with 180s timeout)
             self.replacement_node.stop()
             self.replacement_node.start(jvm_args=[
                                         "-Dcassandra.replace_address_first_boot={}".format(self.replaced_node.address()),
                                         "-Dcassandra.reset_bootstrap_progress=true"
                                         ],
-                                        wait_for_binary_proto=True)
+                                        wait_for_binary_proto=180)
             # check if we reset bootstrap state
             self.replacement_node.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
         elif mode == 'resume':