Improve flake8
patch by Tomek Ĺasica; reviewed by Mick Semb Wever
diff --git a/transient_replication_ring_test.py b/transient_replication_ring_test.py
index e9ccfa1..32a595a 100644
--- a/transient_replication_ring_test.py
+++ b/transient_replication_ring_test.py
@@ -21,6 +21,7 @@
NODELOCAL = 11
+
def jmx_start(to_start, **kwargs):
kwargs['jvm_args'] = kwargs.get('jvm_args', []) + ['-XX:-PerfDisableSharedMem']
to_start.start(**kwargs)
@@ -34,10 +35,12 @@
for node in nodes:
node.nodetool('repair -pr')
+
def cleanup_nodes(nodes):
for node in nodes:
node.nodetool('cleanup')
+
def patch_start(startable):
old_start = startable.start
@@ -67,8 +70,10 @@
def point_select_statement(self):
return SimpleStatement(self.point_select(), consistency_level=NODELOCAL)
- def check_expected(self, sessions, expected, node=[i for i in range(0,1000)], cleanup=False):
+ def check_expected(self, sessions, expected, node=None, cleanup=False):
"""Check that each node has the expected values present"""
+ if node is None:
+ node = list(range(1000))
for idx, session, expect, node in zip(range(0, 1000), sessions, expected, node):
print("Checking idx " + str(idx))
print(str([row for row in session.execute(self.select_statement())]))
@@ -84,10 +89,10 @@
for i in range(0, 40):
count = 0
for session in sessions:
- for row in session.execute(self.point_select_statement(), ["%05d" % i]):
+ for _ in session.execute(self.point_select_statement(), ["%05d" % i]):
count += 1
if exactly:
- assert count == exactly, "Wrong replication for %05d should be exactly" % (i, exactly)
+ assert count == exactly, "Wrong replication for %05d should be exactly %d" % (i, exactly)
if gte:
assert count >= gte, "Count for %05d should be >= %d" % (i, gte)
if lte:
@@ -106,7 +111,7 @@
'num_tokens': 1,
'commitlog_sync_period_in_ms': 500,
'enable_transient_replication': True,
- 'partitioner' : 'org.apache.cassandra.dht.OrderPreservingPartitioner'})
+ 'partitioner': 'org.apache.cassandra.dht.OrderPreservingPartitioner'})
print("CLUSTER INSTALL DIR: ")
print(self.cluster.get_install_dir())
self.cluster.populate(3, tokens=self.tokens, debug=True, install_byteman=True)
@@ -131,17 +136,17 @@
print("CREATE KEYSPACE %s WITH REPLICATION={%s}" % (self.keyspace, replication_params))
session.execute(
"CREATE TABLE %s.%s (pk varchar, ck int, value int, PRIMARY KEY (pk, ck)) WITH speculative_retry = 'NEVER' AND additional_write_policy = 'NEVER' AND read_repair = 'NONE'" % (
- self.keyspace, self.table))
+ self.keyspace, self.table))
def quorum(self, session, stmt_str):
return session.execute(SimpleStatement(stmt_str, consistency_level=ConsistencyLevel.QUORUM))
def insert_row(self, pk, ck, value, session=None, node=None):
session = session or self.exclusive_cql_connection(node or self.node1)
- #token = BytesToken.from_key(pack('>i', pk)).value
- #assert token < BytesToken.from_string(self.tokens[0]).value or BytesToken.from_string(self.tokens[-1]).value < token # primary replica should be node1
- #TODO Is quorum really right? I mean maybe we want ALL with retries since we really don't want to the data
- #not at a replica unless it is intentional
+ # token = BytesToken.from_key(pack('>i', pk)).value
+ # assert token < BytesToken.from_string(self.tokens[0]).value or BytesToken.from_string(self.tokens[-1]).value < token # primary replica should be node1
+ # TODO Is quorum really right? I mean maybe we want ALL with retries since we really don't want to the data
+ # not at a replica unless it is intentional
self.quorum(session, "INSERT INTO %s.%s (pk, ck, value) VALUES ('%05d', %s, %s)" % (self.keyspace, self.table, pk, ck, value))
@flaky(max_runs=1)
@@ -161,13 +166,13 @@
gen_expected(range(12, 31, 2))]
self.check_expected(sessions, expected)
- #Make sure at least a little data is repaired, this shouldn't move data anywhere
+ # Make sure at least a little data is repaired, this shouldn't move data anywhere
repair_nodes(nodes)
self.check_expected(sessions, expected)
- #Ensure that there is at least some transient data around, because of this if it's missing after bootstrap
- #We know we failed to get it from the transient replica losing the range entirely
+ # Ensure that there is at least some transient data around, because of this if it's missing after bootstrap
+ # We know we failed to get it from the transient replica losing the range entirely
nodes[1].stop(wait_other_notice=True)
for i in range(1, 40, 2):
@@ -181,7 +186,7 @@
gen_expected(range(0, 21, 2), range(32, 40, 2)),
gen_expected(range(1, 11, 2), range(11, 31), range(31, 40, 2))]
- #Every node should have some of its fully replicated data and one and two should have some transient data
+ # Every node should have some of its fully replicated data and one and two should have some transient data
self.check_expected(sessions, expected)
node4 = new_node(self.cluster, bootstrap=True, token='00040')
@@ -192,21 +197,21 @@
expected.append(gen_expected(range(11, 20, 2), range(21, 40)))
sessions.append(self.exclusive_cql_connection(node4))
- #Because repair was never run and nodes had transient data it will have data for transient ranges (node1, 11-20)
+ # Because repair was never run and nodes had transient data it will have data for transient ranges (node1, 11-20)
assert_all(sessions[3],
self.select(),
expected[3],
cl=NODELOCAL)
- #Node1 no longer transiently replicates 11-20, so cleanup will clean it up
- #Node1 also now transiently replicates 21-30 and half the values in that range were repaired
+ # Node1 no longer transiently replicates 11-20, so cleanup will clean it up
+ # Node1 also now transiently replicates 21-30 and half the values in that range were repaired
expected[0] = gen_expected(range(0, 11), range(21, 30, 2), range(31, 40))
- #Node2 still missing data since it was down during some insertions, it also lost some range (31-40)
+ # Node2 still missing data since it was down during some insertions, it also lost some range (31-40)
expected[1] = gen_expected(range(0, 21, 2))
expected[2] = gen_expected(range(1, 11, 2), range(11, 31))
- #Cleanup should only impact if a node lost a range entirely or started to transiently replicate it and the data
- #was repaired
+ # Cleanup should only impact if a node lost a range entirely or started to transiently replicate it and the data
+ # was repaired
self.check_expected(sessions, expected, nodes, cleanup=True)
repair_nodes(nodes)
@@ -218,7 +223,7 @@
self.check_expected(sessions, expected, nodes, cleanup=True)
- #Every value should be replicated exactly 2 times
+ # Every value should be replicated exactly 2 times
self.check_replication(sessions, exactly=2)
@flaky(max_runs=1)
@@ -267,7 +272,6 @@
self.check_expected(sessions, expected_after_repair, nodes, cleanup=True)
self.check_replication(sessions, exactly=2)
-
@flaky(max_runs=1)
@pytest.mark.no_vnodes
def test_move_forwards_between_and_cleanup(self):
@@ -275,7 +279,7 @@
move_token = '00025'
expected_after_move = [gen_expected(range(0, 26), range(31, 40, 2)),
gen_expected(range(0, 21, 2), range(31, 40)),
- gen_expected(range(1, 11, 2), range(11, 21, 2), range(21,31)),
+ gen_expected(range(1, 11, 2), range(11, 21, 2), range(21, 31)),
gen_expected(range(21, 26, 2), range(26, 40))]
expected_after_repair = [gen_expected(range(0, 26)),
gen_expected(range(0, 21), range(31, 40)),
@@ -283,7 +287,6 @@
gen_expected(range(26, 40))]
self.move_test(move_token, expected_after_move, expected_after_repair)
-
@flaky(max_runs=1)
@pytest.mark.no_vnodes
def test_move_forwards_and_cleanup(self):
@@ -299,7 +302,6 @@
gen_expected(range(21, 40))]
self.move_test(move_token, expected_after_move, expected_after_repair)
-
@flaky(max_runs=1)
@pytest.mark.no_vnodes
def test_move_backwards_between_and_cleanup(self):
@@ -315,7 +317,6 @@
gen_expected(range(31, 40))]
self.move_test(move_token, expected_after_move, expected_after_repair)
-
@flaky(max_runs=1)
@pytest.mark.no_vnodes
def test_move_backwards_and_cleanup(self):
@@ -331,7 +332,6 @@
gen_expected(range(21, 40))]
self.move_test(move_token, expected_after_move, expected_after_repair)
-
@flaky(max_runs=1)
@pytest.mark.no_vnodes
def test_decommission(self):
@@ -367,7 +367,7 @@
self.check_expected(sessions, expected)
- #node1 has transient data we want to see streamed out on move
+ # node1 has transient data we want to see streamed out on move
nodes[3].nodetool('decommission')
nodes = nodes[:-1]
@@ -384,7 +384,7 @@
repair_nodes(nodes)
- #There should be no transient data anywhere
+ # There should be no transient data anywhere
expected = [gen_expected(range(0, 11), range(21, 40)),
gen_expected(range(0, 21), range(31, 40)),
gen_expected(range(11, 31))]
@@ -392,7 +392,6 @@
self.check_expected(sessions, expected, nodes, cleanup=True)
self.check_replication(sessions, exactly=2)
-
@flaky(max_runs=1)
@pytest.mark.no_vnodes
def test_remove(self):
@@ -403,13 +402,12 @@
main_session = self.patient_cql_connection(self.node1)
nodes = [self.node1, self.node2, self.node3]
- #We want the node being removed to have no data on it so nodetool remove always gets all the necessary data
- #from survivors
+ # We want the node being removed to have no data on it
+ # so nodetool remove always gets all the necessary data from survivors
node4_id = node4.nodetool('info').stdout[25:61]
node4.stop(wait_other_notice=True)
for i in range(0, 40):
- print("Inserting " + str(i))
self.insert_row(i, i, i, main_session)
sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3]]
@@ -423,45 +421,34 @@
nodes[0].nodetool('removenode ' + node4_id)
- #Give streaming time to occur, it's asynchronous from removenode completing at other ndoes
+ # Give streaming time to occur, it's asynchronous from removenode completing at other nodes
import time
time.sleep(15)
- # Everyone should have everything except
- expected = [gen_expected(range(0, 40)),
- gen_expected(range(0, 40)),
- gen_expected(range(0,40))]
+ self._everyone_should_have_everything(sessions)
- self.check_replication(sessions, exactly=3)
- self.check_expected(sessions, expected)
repair_nodes(nodes)
cleanup_nodes(nodes)
- self.check_replication(sessions, exactly=2)
-
- expected = [gen_expected(range(0,11), range(21,40)),
- gen_expected(range(0,21), range(31, 40)),
- gen_expected(range(11,31))]
- self.check_expected(sessions, expected)
+ self._nodes_have_proper_ranges_after_repair_and_cleanup(sessions)
@flaky(max_runs=1)
@pytest.mark.no_vnodes
def test_replace(self):
main_session = self.patient_cql_connection(self.node1)
- #We want the node being replaced to have no data on it so the replacement definitely fetches all the data
+ # We want the node being replaced to have no data on it so the replacement definitely fetches all the data
self.node2.stop(wait_other_notice=True)
for i in range(0, 40):
- print("Inserting " + str(i))
self.insert_row(i, i, i, main_session)
replacement_address = self.node2.address()
self.node2.stop(wait_other_notice=True)
self.cluster.remove(self.node2)
self.node2 = Node('replacement', cluster=self.cluster, auto_bootstrap=True,
- thrift_interface=None, storage_interface=(replacement_address, 7000),
- jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042))
+ thrift_interface=None, storage_interface=(replacement_address, 7000),
+ jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042))
patch_start(self.node2)
nodes = [self.node1, self.node2, self.node3]
self.cluster.add(self.node2, False, data_center='datacenter1')
@@ -472,20 +459,21 @@
sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3]]
- # Everyone should have everything
- expected = [gen_expected(range(0, 40)),
- gen_expected(range(0, 40)),
- gen_expected(range(0,40))]
-
- self.check_replication(sessions, exactly=3)
- self.check_expected(sessions, expected)
+ self._everyone_should_have_everything(sessions)
repair_nodes(nodes)
cleanup_nodes(nodes)
- self.check_replication(sessions, exactly=2)
+ self._nodes_have_proper_ranges_after_repair_and_cleanup(sessions)
- expected = [gen_expected(range(0,11), range(21,40)),
- gen_expected(range(0,21), range(31, 40)),
- gen_expected(range(11,31))]
- self.check_expected(sessions, expected)
\ No newline at end of file
+ def _everyone_should_have_everything(self, sessions):
+ expected = [gen_expected(range(0, 40))] * 3
+ self.check_replication(sessions, exactly=3)
+ self.check_expected(sessions, expected)
+
+ def _nodes_have_proper_ranges_after_repair_and_cleanup(self, sessions):
+ expected = [gen_expected(range(0, 11), range(21, 40)),
+ gen_expected(range(0, 21), range(31, 40)),
+ gen_expected(range(11, 31))]
+ self.check_replication(sessions, exactly=2)
+ self.check_expected(sessions, expected)