OMID-226 Migrate from mockito-all to mockito-core and clean up test d… (#123)
* OMID-226 Migrate from mockito-all to mockito-core and clean up test dependencies
At some point OMID will upgrade to latest Hadoop 2.10 and HBase 2.4
releases and will then experience IncompatibleClassChangeErrors in
unit tests due to a conflict between mockito-all used here and
mockito-core used by Hadoop and HBase. Prepare for this by migrating
from mockito-all to mockito-core now.
Update matchers in some unit tests for minor differences in mocking
and matching behavior after the migration.
Clean up test dependency declarations in POMs while at it.
Co-authored-by: Andrew Purtell <apurtell@salesforce.com>
diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml
index b9ff7db..ec42573 100644
--- a/benchmarks/pom.xml
+++ b/benchmarks/pom.xml
@@ -107,7 +107,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
diff --git a/commit-table/pom.xml b/commit-table/pom.xml
index b65ff42..d272d01 100644
--- a/commit-table/pom.xml
+++ b/commit-table/pom.xml
@@ -43,7 +43,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
diff --git a/common/pom.xml b/common/pom.xml
index e70cf99..c070b57 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -78,7 +78,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
</dependency>
<!-- end testing -->
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 34ce5b1..e4e6391 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -132,7 +132,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
<!-- Some Hadoop versions' minicluster dependencies don't pull this in
@@ -144,8 +143,7 @@
</dependency>
<dependency>
<groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <version>${mockito.version}</version>
+ <artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/hbase-commit-table/pom.xml b/hbase-commit-table/pom.xml
index f3cfb84..0fa2c42 100644
--- a/hbase-commit-table/pom.xml
+++ b/hbase-commit-table/pom.xml
@@ -97,7 +97,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
<!-- Some Hadoop versions' minicluster dependencies don't pull this in
@@ -108,6 +107,11 @@
<scope>test</scope>
</dependency>
<dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-testing-util</artifactId>
<scope>test</scope>
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 2792cc6..958d522 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -72,7 +72,18 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <!-- Some Hadoop versions' minicluster dependencies don't pull this in
+ so need to add explicitly even though Omid uses testng.-->
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/hbase-coprocessor/pom.xml b/hbase-coprocessor/pom.xml
index 318032f..4947276 100644
--- a/hbase-coprocessor/pom.xml
+++ b/hbase-coprocessor/pom.xml
@@ -123,7 +123,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
<!-- Some Hadoop versions' minicluster dependencies don't pull this in
@@ -134,16 +133,14 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-all</artifactId>
- <version>${hamcrest.version}</version>
- <type>jar</type>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <version>${mockito.version}</version>
+ <groupId>org.hamcrest</groupId>
+ <artifactId>hamcrest-all</artifactId>
+ <type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/hbase-tools/pom.xml b/hbase-tools/pom.xml
index e2e977e..107797b 100644
--- a/hbase-tools/pom.xml
+++ b/hbase-tools/pom.xml
@@ -101,7 +101,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
<!-- Some Hadoop versions' minicluster dependencies don't pull this in
@@ -112,6 +111,11 @@
<scope>test</scope>
</dependency>
<dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-testing-util</artifactId>
<scope>test</scope>
diff --git a/pom.xml b/pom.xml
index d26f00b..6607ce0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -167,7 +167,7 @@
<protobuf.plugin.version>0.6.1</protobuf.plugin.version>
<os.plugin.version>1.6.2</os.plugin.version>
<junit.version>4.13.1</junit.version>
- <mockito.version>1.10.19</mockito.version>
+ <mockito.version>2.28.2</mockito.version>
<disruptor.version>3.2.0</disruptor.version>
<metrics.version>3.0.1</metrics.version>
<jcommander.version>1.82</jcommander.version>
@@ -1102,12 +1102,35 @@
</dependency>
<!-- Other test dependencies -->
+
+ <dependency>
+ <groupId>org.testng</groupId>
+ <artifactId>testng</artifactId>
+ <version>${testng.version}</version>
+ <scope>test</scope>
+ </dependency>
+
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
+
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <version>${mockito.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.hamcrest</groupId>
+ <artifactId>hamcrest-all</artifactId>
+ <version>${hamcrest.version}</version>
+ <scope>test</scope>
+ </dependency>
+
</dependencies>
</dependencyManagement>
diff --git a/statemachine/pom.xml b/statemachine/pom.xml
index 1383596..3001b38 100644
--- a/statemachine/pom.xml
+++ b/statemachine/pom.xml
@@ -68,7 +68,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
diff --git a/timestamp-storage/pom.xml b/timestamp-storage/pom.xml
index 471d173..9412349 100644
--- a/timestamp-storage/pom.xml
+++ b/timestamp-storage/pom.xml
@@ -118,7 +118,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
<!-- Some Hadoop versions' minicluster dependencies don't pull this in
@@ -130,8 +129,7 @@
</dependency>
<dependency>
<groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <version>${mockito.version}</version>
+ <artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/transaction-client/pom.xml b/transaction-client/pom.xml
index 2f4b2ee..fbea5c6 100644
--- a/transaction-client/pom.xml
+++ b/transaction-client/pom.xml
@@ -133,7 +133,6 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
<!-- end testing -->
diff --git a/tso-server/pom.xml b/tso-server/pom.xml
index 4445280..cfe87bd 100644
--- a/tso-server/pom.xml
+++ b/tso-server/pom.xml
@@ -215,13 +215,11 @@
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
- <version>${testng.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <version>${mockito.version}</version>
+ <artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<!-- Some Hadoop versions' minicluster dependencies don't pull this in
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestPersistenceProcessorHandler.java b/tso-server/src/test/java/org/apache/omid/tso/TestPersistenceProcessorHandler.java
index 02a6790..3f77b42 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestPersistenceProcessorHandler.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestPersistenceProcessorHandler.java
@@ -168,7 +168,7 @@
verify(persistenceHandler, times(1)).flush(eq(0));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(eq(batch));
- verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertTrue(batch.isEmpty());
@@ -179,14 +179,14 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addTimestamp(FIRST_ST, null, mock(MonitoringContextImpl.class));
+ batch.addTimestamp(FIRST_ST, null, mock(MonitoringContext.class));
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
persistenceHandler.onEvent(batchEvent);
verify(persistenceHandler, times(1)).flush(eq(0));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(eq(batch));
- verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 1);
assertEquals(batch.get(0).getStartTimestamp(), FIRST_ST);
@@ -198,14 +198,14 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
+ batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
persistenceHandler.onEvent(batchEvent);
verify(persistenceHandler, times(1)).flush(eq(1));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(batch);
- verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 1);
assertEquals(batch.get(0).getStartTimestamp(), FIRST_ST);
@@ -218,14 +218,14 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addAbort(FIRST_ST, null, mock(MonitoringContextImpl.class));
+ batch.addAbort(FIRST_ST, null, mock(MonitoringContext.class));
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
persistenceHandler.onEvent(batchEvent);
verify(persistenceHandler, times(1)).flush(eq(0));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(batch);
- verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 1);
assertEquals(batch.get(0).getStartTimestamp(), FIRST_ST);
@@ -237,7 +237,7 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommitRetry(FIRST_ST, null, mock(MonitoringContextImpl.class));
+ batch.addCommitRetry(FIRST_ST, null, mock(MonitoringContext.class));
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -246,7 +246,7 @@
verify(persistenceHandler, times(1)).flush(eq(0));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(batch);
- verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(FIRST_ST), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(FIRST_ST), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 0);
@@ -257,8 +257,8 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
- batch.addCommitRetry(SECOND_ST, null, mock(MonitoringContextImpl.class));
+ batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
+ batch.addCommitRetry(SECOND_ST, null, mock(MonitoringContext.class));
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -270,7 +270,7 @@
verify(persistenceHandler, times(1)).flush(eq(1));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(eq(batch));
- verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(SECOND_ST), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(SECOND_ST), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 1);
assertEquals(batch.get(0).getStartTimestamp(), FIRST_ST);
@@ -286,8 +286,8 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommitRetry(FIRST_ST, null, mock(MonitoringContextImpl.class));
- batch.addCommit(SECOND_ST, SECOND_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
+ batch.addCommitRetry(FIRST_ST, null, mock(MonitoringContext.class));
+ batch.addCommit(SECOND_ST, SECOND_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -299,7 +299,7 @@
verify(persistenceHandler, times(1)).flush(eq(1));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(eq(batch));
- verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(FIRST_ST), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(FIRST_ST), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 1);
assertEquals(batch.get(0).getStartTimestamp(), SECOND_ST);
@@ -312,8 +312,8 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommitRetry(FIRST_ST, null, mock(MonitoringContextImpl.class));
- batch.addCommitRetry(SECOND_ST, null, mock(MonitoringContextImpl.class));
+ batch.addCommitRetry(FIRST_ST, null, mock(MonitoringContext.class));
+ batch.addCommitRetry(SECOND_ST, null, mock(MonitoringContext.class));
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -325,8 +325,8 @@
verify(persistenceHandler, times(1)).flush(eq(0));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(eq(batch));
- verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(FIRST_ST), any(Channel.class), any(MonitoringContextImpl.class));
- verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(SECOND_ST), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(FIRST_ST), any(), any(MonitoringContext.class));
+ verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(SECOND_ST), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 0);
@@ -337,8 +337,8 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addAbort(FIRST_ST, null, mock(MonitoringContextImpl.class));
- batch.addAbort(SECOND_ST, null, mock(MonitoringContextImpl.class));
+ batch.addAbort(FIRST_ST, null, mock(MonitoringContext.class));
+ batch.addAbort(SECOND_ST, null, mock(MonitoringContext.class));
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -350,7 +350,7 @@
verify(persistenceHandler, times(1)).flush(eq(0));
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(eq(batch));
- verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, never()).disambiguateRetryRequestHeuristically(anyLong(), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 2);
assertEquals(batch.get(0).getStartTimestamp(), FIRST_ST);
@@ -365,12 +365,12 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addTimestamp(FIRST_ST, null, mock(MonitoringContextImpl.class));
- batch.addCommitRetry(SECOND_ST, null, mock(MonitoringContextImpl.class));
- batch.addCommit(THIRD_ST, THIRD_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
- batch.addAbort(FOURTH_ST, null, mock(MonitoringContextImpl.class));
- batch.addCommit(FIFTH_ST, FIFTH_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
- batch.addCommitRetry(SIXTH_ST, null, mock(MonitoringContextImpl.class));
+ batch.addTimestamp(FIRST_ST, null, mock(MonitoringContext.class));
+ batch.addCommitRetry(SECOND_ST, null, mock(MonitoringContext.class));
+ batch.addCommit(THIRD_ST, THIRD_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
+ batch.addAbort(FOURTH_ST, null, mock(MonitoringContext.class));
+ batch.addCommit(FIFTH_ST, FIFTH_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
+ batch.addCommitRetry(SIXTH_ST, null, mock(MonitoringContext.class));
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -382,7 +382,7 @@
verify(persistenceHandler, times(1)).flush(2); // 2 commits to flush
verify(persistenceHandler, times(1)).filterAndDissambiguateClientRetries(eq(batch));
- verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(SECOND_ST), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(retryProcessor, times(1)).disambiguateRetryRequestHeuristically(eq(SECOND_ST), any(), any(MonitoringContext.class));
verify(replyProcessor, times(1)).manageResponsesBatch(eq(BATCH_SEQUENCE), eq(batch));
assertEquals(batch.getNumEvents(), 4);
assertEquals(batch.get(0).getStartTimestamp(), FIRST_ST);
@@ -409,7 +409,7 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
+ batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -451,7 +451,7 @@
// Prepare test batch
Batch batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
+ batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
PersistBatchEvent batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -463,7 +463,7 @@
}
verify(persistenceHandler, times(1)).flush(eq(1));
verify(mockWriter, never()).flush();
- verify(panicker, times(1)).panic(eq("Replica localhost:1234 lost mastership whilst flushing data. Committing suicide"), any(IOException.class));
+ verify(panicker, times(1)).panic(eq("Replica localhost:1234 lost mastership whilst flushing data. Committing suicide"), any(Throwable.class));
verify(persistenceHandler, never()).filterAndDissambiguateClientRetries(any(Batch.class));
verify(replyProcessor, never()).manageResponsesBatch(anyLong(), any(Batch.class));
@@ -486,7 +486,7 @@
// Prepare test batch
batch = new Batch(BATCH_ID, BATCH_SIZE);
- batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContextImpl.class), Optional.<Long>absent());
+ batch.addCommit(FIRST_ST, FIRST_CT, null, mock(MonitoringContext.class), Optional.<Long>absent());
batchEvent = new PersistBatchEvent();
PersistBatchEvent.makePersistBatch(batchEvent, BATCH_SEQUENCE, batch);
@@ -498,7 +498,7 @@
}
verify(persistenceHandler, times(1)).flush(eq(1));
verify(mockWriter, times(1)).flush();
- verify(panicker, times(1)).panic(eq("Replica localhost:1234 lost mastership whilst flushing data. Committing suicide"), any(IOException.class));
+ verify(panicker, times(1)).panic(eq("Replica localhost:1234 lost mastership whilst flushing data. Committing suicide"), any(Throwable.class));
verify(persistenceHandler, never()).filterAndDissambiguateClientRetries(any(Batch.class));
verify(replyProcessor, never()).manageResponsesBatch(anyLong(), any(Batch.class));
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java b/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java
index e8c621f..f5836dd 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestRequestProcessor.java
@@ -99,13 +99,13 @@
requestProc.timestampRequest(null, new MonitoringContextImpl(metrics));
ArgumentCaptor<Long> firstTScapture = ArgumentCaptor.forClass(Long.class);
verify(persist, timeout(100).times(1)).addTimestampToBatch(
- firstTScapture.capture(), any(Channel.class), any(MonitoringContextImpl.class));
+ firstTScapture.capture(), any(), any(MonitoringContext.class));
long firstTS = firstTScapture.getValue();
// verify that timestamps increase monotonically
for (int i = 0; i < 100; i++) {
requestProc.timestampRequest(null, new MonitoringContextImpl(metrics));
- verify(persist, timeout(100).times(1)).addTimestampToBatch(eq(firstTS), any(Channel.class), any(MonitoringContext.class));
+ verify(persist, timeout(100).times(1)).addTimestampToBatch(eq(firstTS), any(), any(MonitoringContext.class));
firstTS += CommitTable.MAX_CHECKPOINTS_PER_TXN;
}
@@ -117,36 +117,36 @@
requestProc.timestampRequest(null, new MonitoringContextImpl(metrics));
ArgumentCaptor<Long> TScapture = ArgumentCaptor.forClass(Long.class);
verify(persist, timeout(100).times(1)).addTimestampToBatch(
- TScapture.capture(), any(Channel.class), any(MonitoringContextImpl.class));
+ TScapture.capture(), any(), any(MonitoringContext.class));
long firstTS = TScapture.getValue();
List<Long> writeSet = Lists.newArrayList(1L, 20L, 203L);
requestProc.commitRequest(firstTS - CommitTable.MAX_CHECKPOINTS_PER_TXN, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContextImpl(metrics));
- verify(persist, timeout(100).times(1)).addAbortToBatch(eq(firstTS - CommitTable.MAX_CHECKPOINTS_PER_TXN), any(Channel.class), any(MonitoringContext.class));
+ verify(persist, timeout(100).times(1)).addAbortToBatch(eq(firstTS - CommitTable.MAX_CHECKPOINTS_PER_TXN), any(), any(MonitoringContext.class));
requestProc.commitRequest(firstTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContextImpl(metrics));
ArgumentCaptor<Long> commitTScapture = ArgumentCaptor.forClass(Long.class);
- verify(persist, timeout(100).times(1)).addCommitToBatch(eq(firstTS), commitTScapture.capture(), any(Channel.class), any(MonitoringContext.class), any(Optional.class));
+ verify(persist, timeout(100).times(1)).addCommitToBatch(eq(firstTS), commitTScapture.capture(), any(), any(MonitoringContext.class), any(Optional.class));
assertTrue(commitTScapture.getValue() > firstTS, "Commit TS must be greater than start TS");
// test conflict
requestProc.timestampRequest(null, new MonitoringContextImpl(metrics));
TScapture = ArgumentCaptor.forClass(Long.class);
verify(persist, timeout(100).times(2)).addTimestampToBatch(
- TScapture.capture(), any(Channel.class), any(MonitoringContextImpl.class));
+ TScapture.capture(), any(), any(MonitoringContext.class));
long secondTS = TScapture.getValue();
requestProc.timestampRequest(null, new MonitoringContextImpl(metrics));
TScapture = ArgumentCaptor.forClass(Long.class);
verify(persist, timeout(100).times(3)).addTimestampToBatch(
- TScapture.capture(), any(Channel.class), any(MonitoringContextImpl.class));
+ TScapture.capture(), any(), any(MonitoringContext.class));
long thirdTS = TScapture.getValue();
requestProc.commitRequest(thirdTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContextImpl(metrics));
- verify(persist, timeout(100).times(1)).addCommitToBatch(eq(thirdTS), anyLong(), any(Channel.class), any(MonitoringContextImpl.class), any(Optional.class));
+ verify(persist, timeout(100).times(1)).addCommitToBatch(eq(thirdTS), anyLong(), any(), any(MonitoringContext.class), any(Optional.class));
requestProc.commitRequest(secondTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContextImpl(metrics));
- verify(persist, timeout(100).times(1)).addAbortToBatch(eq(secondTS), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(persist, timeout(100).times(1)).addAbortToBatch(eq(secondTS), any(), any(MonitoringContext.class));
}
@@ -156,7 +156,7 @@
requestProc.fenceRequest(666L, null, new MonitoringContextImpl(metrics));
ArgumentCaptor<Long> firstTScapture = ArgumentCaptor.forClass(Long.class);
verify(replyProcessor, timeout(100).times(1)).sendFenceResponse(eq(666L),
- firstTScapture.capture(), any(Channel.class), any(MonitoringContext.class));
+ firstTScapture.capture(), any(), any(MonitoringContext.class));
}
@@ -169,8 +169,8 @@
requestProc.timestampRequest(null, new MonitoringContextImpl(metrics));
ArgumentCaptor<Long> capturedTS = ArgumentCaptor.forClass(Long.class);
verify(persist, timeout(100).times(1)).addTimestampToBatch(capturedTS.capture(),
- any(Channel.class),
- any(MonitoringContextImpl.class));
+ any(),
+ any(MonitoringContext.class));
long startTS = capturedTS.getValue();
// ... simulate the reset of the RequestProcessor state (e.g. due to
@@ -179,7 +179,7 @@
// ...check that the transaction is aborted when trying to commit
requestProc.commitRequest(startTS, writeSet, new ArrayList<Long>(0), false, null, new MonitoringContextImpl(metrics));
- verify(persist, timeout(100).times(1)).addAbortToBatch(eq(startTS), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(persist, timeout(100).times(1)).addAbortToBatch(eq(startTS), any(), any(MonitoringContext.class));
}
@@ -201,9 +201,9 @@
// Check that first time its called is on init
verify(lowWatermarkWriter, timeout(100).times(1)).persistLowWatermark(eq(0L));
// Then, check it is called when cache is full and the first element is evicted (should be a AbstractTransactionManager.NUM_OF_CHECKPOINTS)
- verify(persist, timeout(100).times(1)).addCommitToBatch(eq(ANY_START_TS), anyLong(), any(Channel.class), any(MonitoringContextImpl.class), eq(Optional.of(FIRST_COMMIT_TS_EVICTED)));
+ verify(persist, timeout(100).times(1)).addCommitToBatch(eq(ANY_START_TS), anyLong(), any(), any(MonitoringContext.class), eq(Optional.of(FIRST_COMMIT_TS_EVICTED)));
// Finally it should never be called with the next element
- verify(persist, timeout(100).times(0)).addCommitToBatch(eq(ANY_START_TS), anyLong(), any(Channel.class), any(MonitoringContextImpl.class), eq(Optional.of(NEXT_COMMIT_TS_THAT_SHOULD_BE_EVICTED)));
+ verify(persist, timeout(100).times(0)).addCommitToBatch(eq(ANY_START_TS), anyLong(), any(), any(MonitoringContext.class), eq(Optional.of(NEXT_COMMIT_TS_THAT_SHOULD_BE_EVICTED)));
}
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestRetryProcessor.java b/tso-server/src/test/java/org/apache/omid/tso/TestRetryProcessor.java
index 370b27b..a5fdba8 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestRetryProcessor.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestRetryProcessor.java
@@ -79,7 +79,7 @@
retryProc.disambiguateRetryRequestHeuristically(NON_EXISTING_ST_TX, channel, monCtx);
ArgumentCaptor<Long> firstTSCapture = ArgumentCaptor.forClass(Long.class);
- verify(replyProc, timeout(100).times(1)).sendAbortResponse(firstTSCapture.capture(), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(replyProc, timeout(100).times(1)).sendAbortResponse(firstTSCapture.capture(), any(), any(MonitoringContext.class));
long startTS = firstTSCapture.getValue();
assertEquals(startTS, NON_EXISTING_ST_TX, "Captured timestamp should be the same as NON_EXISTING_ST_TX");
}
@@ -99,7 +99,7 @@
verify(replyProc, timeout(100).times(1)).sendCommitResponse(firstTSCapture.capture(),
secondTSCapture.capture(),
- any(Channel.class), any(MonitoringContextImpl.class), any(Optional.class));
+ any(), any(MonitoringContext.class), any(Optional.class));
long startTS = firstTSCapture.getValue();
long commitTS = secondTSCapture.getValue();
@@ -128,7 +128,7 @@
// Test we return an Abort to a retry request when the transaction id IS in the commit table BUT invalidated
retryProc.disambiguateRetryRequestHeuristically(ST_TX_1, channel, new MonitoringContextImpl(metrics));
ArgumentCaptor<Long> startTSCapture = ArgumentCaptor.forClass(Long.class);
- verify(replyProc, timeout(100).times(1)).sendAbortResponse(startTSCapture.capture(), any(Channel.class), any(MonitoringContextImpl.class));
+ verify(replyProc, timeout(100).times(1)).sendAbortResponse(startTSCapture.capture(), any(), any(MonitoringContext.class));
long startTS = startTSCapture.getValue();
Assert.assertEquals(startTS, ST_TX_1, "Captured timestamp should be the same as NON_EXISTING_ST_TX");
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java b/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java
index cae9c85..269594f 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestTSOChannelHandlerNetty.java
@@ -281,9 +281,9 @@
tsBuilder.setTimestampRequest(tsRequestBuilder.build());
// Write into the channel
channel.writeAndFlush(tsBuilder.build()).await();
- verify(requestProcessor, timeout(100).times(1)).timestampRequest(any(Channel.class), any(MonitoringContextImpl.class));
+ verify(requestProcessor, timeout(100).times(1)).timestampRequest(any(), any(MonitoringContext.class));
verify(requestProcessor, timeout(100).times(0))
- .commitRequest(anyLong(), anyCollectionOf(Long.class), anyCollectionOf(Long.class), anyBoolean(), any(Channel.class), any(MonitoringContextImpl.class));
+ .commitRequest(anyLong(), anyCollectionOf(Long.class), anyCollectionOf(Long.class), anyBoolean(), any(), any(MonitoringContext.class));
}
private void testWritingCommitRequest(Channel channel) throws InterruptedException {
@@ -298,9 +298,9 @@
assertTrue(r.hasCommitRequest());
// Write into the channel
channel.writeAndFlush(commitBuilder.build()).await();
- verify(requestProcessor, timeout(100).times(0)).timestampRequest(any(Channel.class), any(MonitoringContextImpl.class));
+ verify(requestProcessor, timeout(100).times(0)).timestampRequest(any(), any(MonitoringContext.class));
verify(requestProcessor, timeout(100).times(1))
- .commitRequest(eq(666L), anyCollectionOf(Long.class), anyCollectionOf(Long.class), eq(false), any(Channel.class), any(MonitoringContextImpl.class));
+ .commitRequest(eq(666L), anyCollectionOf(Long.class), anyCollectionOf(Long.class), eq(false), any(), any(MonitoringContext.class));
}
private void testWritingFenceRequest(Channel channel) throws InterruptedException {
@@ -314,9 +314,9 @@
assertTrue(r.hasFenceRequest());
// Write into the channel
channel.writeAndFlush(fenceBuilder.build()).await();
- verify(requestProcessor, timeout(100).times(0)).timestampRequest(any(Channel.class), any(MonitoringContextImpl.class));
+ verify(requestProcessor, timeout(100).times(0)).timestampRequest(any(), any(MonitoringContext.class));
verify(requestProcessor, timeout(100).times(1))
- .fenceRequest(eq(666L), any(Channel.class), any(MonitoringContextImpl.class));
+ .fenceRequest(eq(666L), any(), any(MonitoringContext.class));
}
// ----------------------------------------------------------------------------------------------------------------
diff --git a/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java b/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java
index 182c611..dd4e2be 100644
--- a/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java
+++ b/tso-server/src/test/java/org/apache/omid/tso/TestWorldTimeOracle.java
@@ -84,9 +84,6 @@
@Test(timeOut = 10_000)
public void testTimestampOraclePanicsWhenTheStorageHasProblems() throws Exception {
- // Intialize component under test
- worldClockOracle.initialize();
-
// Cause an exception when updating the max timestamp
final CountDownLatch updateMaxTimestampMethodCalled = new CountDownLatch(1);
doAnswer(new Answer() {
@@ -97,17 +94,8 @@
}
}).when(timestampStorage).updateMaxTimestamp(anyLong(), anyLong());
- // Make the previous exception to be thrown
- Thread allocThread = new Thread("AllocThread") {
- @Override
- public void run() {
- while (true) {
- worldClockOracle.next();
- }
- }
- };
- allocThread.start();
-
+ // Intialize component under test
+ worldClockOracle.initialize();
updateMaxTimestampMethodCalled.await();
// Verify that it has blown up