Comprehensive event (#15858)
As the title said.
diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java
index fe4190d..a7d8918 100644
--- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java
+++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java
@@ -430,6 +430,12 @@
}
@Override
+ public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) {
+ setProperty("pipe_enable_memory_checked", String.valueOf(isPipeEnableMemoryCheck));
+ return this;
+ }
+
+ @Override
public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) {
setProperty("pipe_air_gap_receiver_enabled", String.valueOf(isPipeAirGapReceiverEnabled));
return this;
diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java
index c2ade6e..e4a4ace 100644
--- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java
+++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java
@@ -439,6 +439,13 @@
}
@Override
+ public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) {
+ dnConfig.setIsPipeEnableMemoryCheck(isPipeEnableMemoryCheck);
+ cnConfig.setIsPipeEnableMemoryCheck(isPipeEnableMemoryCheck);
+ return this;
+ }
+
+ @Override
public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) {
dnConfig.setPipeAirGapReceiverEnabled(isPipeAirGapReceiverEnabled);
cnConfig.setPipeAirGapReceiverEnabled(isPipeAirGapReceiverEnabled);
diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java
index 581c5f4..23a8dad 100644
--- a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java
+++ b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java
@@ -309,6 +309,11 @@
}
@Override
+ public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) {
+ return this;
+ }
+
+ @Override
public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) {
return this;
}
diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java
index 16d5095..178f182 100644
--- a/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java
+++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java
@@ -138,6 +138,8 @@
CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode);
+ CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck);
+
CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled);
CommonConfig setDriverTaskExecutionTimeSliceInMs(long driverTaskExecutionTimeSliceInMs);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java
index 1817efd..59478fc 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java
@@ -48,13 +48,15 @@
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java
index 65eb245..9da5591 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java
@@ -59,14 +59,16 @@
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
.setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS);
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
.setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS);
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java
index d2ca8ce..c8d5bc0 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java
@@ -74,7 +74,8 @@
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
.setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS);
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
@@ -84,7 +85,8 @@
.setSchemaReplicationFactor(3)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
.setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS);
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java
index 9721315..2114ed3 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java
@@ -66,14 +66,17 @@
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
+
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setPipeAirGapReceiverEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java
index 7fbb0f3..9a8ec2f 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java
@@ -459,7 +459,6 @@
testLoadTsFileWithoutVerify("async");
}
- @Test
private void testLoadTsFileWithoutVerify(final String loadTsFileStrategy) throws Exception {
final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java
index 0b4636c..4b33de0 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java
@@ -71,13 +71,15 @@
// Disable sender compaction for tsfile determination in loose range test
.setEnableSeqSpaceCompaction(false)
.setEnableUnseqSpaceCompaction(false)
- .setEnableCrossSpaceCompaction(false);
+ .setEnableCrossSpaceCompaction(false)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java
index addf031..771d50c 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java
@@ -65,13 +65,15 @@
// of the tested idempotent sql.
.setDefaultSchemaRegionGroupNumPerDatabase(1)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java
index 125fd69..13a63a5 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java
@@ -59,13 +59,15 @@
.setAutoCreateSchemaEnabled(true)
.setTimestampPrecision("ms")
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java
index c5d41f0..1f64676 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java
@@ -74,7 +74,8 @@
.setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
.setDataRegionConsensusProtocolClass(dataRegionConsensus)
.setSchemaReplicationFactor(schemaRegionReplicationFactor)
- .setDataReplicationFactor(dataRegionReplicationFactor);
+ .setDataReplicationFactor(dataRegionReplicationFactor)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
@@ -83,7 +84,8 @@
.setSchemaRegionConsensusProtocolClass(schemaRegionConsensus)
.setDataRegionConsensusProtocolClass(dataRegionConsensus)
.setSchemaReplicationFactor(schemaRegionReplicationFactor)
- .setDataReplicationFactor(dataRegionReplicationFactor);
+ .setDataReplicationFactor(dataRegionReplicationFactor)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java
index 563377c..c71e212 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java
@@ -61,13 +61,15 @@
// Disable sender compaction to test mods
.setEnableSeqSpaceCompaction(false)
.setEnableUnseqSpaceCompaction(false)
- .setEnableCrossSpaceCompaction(false);
+ .setEnableCrossSpaceCompaction(false)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(true)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java
index 3e9f511..b7091a1 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java
@@ -48,13 +48,15 @@
.getCommonConfig()
.setAutoCreateSchemaEnabled(false)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
receiverEnv
.getConfig()
.getCommonConfig()
.setAutoCreateSchemaEnabled(false)
.setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
- .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS);
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setIsPipeEnableMemoryCheck(false);
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java
index 4c51357..9a48442 100644
--- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java
@@ -36,6 +36,7 @@
env.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true);
// 10 min, assert that the operations will not time out
env.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
+ env.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
env.initClusterEnvironment();
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java
index de17dec..8156139 100644
--- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java
@@ -82,7 +82,8 @@
.setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
.setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
.setSchemaReplicationFactor(3)
- .setDataReplicationFactor(2);
+ .setDataReplicationFactor(2)
+ .setIsPipeEnableMemoryCheck(false);
EnvFactory.getEnv().initClusterEnvironment(3, 3);
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java
index 594f9ef..5e61607 100644
--- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java
@@ -58,6 +58,9 @@
// 10 min, assert that the operations will not time out
senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
+
+ senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
+ receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
}
@Override
diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java
index 6f8cba4..5ad46b1 100644
--- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java
@@ -117,8 +117,11 @@
protected void setUpConfig() {
super.setUpConfig();
+ senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
+
// Enable air gap receiver
receiverEnv.getConfig().getCommonConfig().setPipeAirGapReceiverEnabled(true);
+ receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
}
@Override
diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java
index 4327b7a..1939a98 100644
--- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java
@@ -68,7 +68,9 @@
// Set timestamp precision to nanosecond
senderEnv.getConfig().getCommonConfig().setTimestampPrecision("ns");
+ senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
receiverEnv.getConfig().getCommonConfig().setTimestampPrecision("ns");
+ receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
}
@Test
diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java
index b046f09..3797183 100644
--- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java
@@ -91,6 +91,9 @@
.setPipeHeartbeatIntervalSecondsForCollectingPipeMeta(30);
senderEnv.getConfig().getCommonConfig().setPipeMetaSyncerInitialSyncDelayMinutes(1);
senderEnv.getConfig().getCommonConfig().setPipeMetaSyncerSyncIntervalMinutes(1);
+ senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
+
+ receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
}
@Test
diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java
index 3f41505..7b56402 100644
--- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java
@@ -33,7 +33,12 @@
super.setUp();
// enable subscription
- EnvFactory.getEnv().getConfig().getCommonConfig().setSubscriptionEnabled(true);
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setSubscriptionEnabled(true)
+ .setIsPipeEnableMemoryCheck(false);
+
EnvFactory.getEnv().initClusterEnvironment();
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java
index 3b49eb8..7c2a9698 100644
--- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java
@@ -69,6 +69,10 @@
sender.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
receiver1.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
receiver2.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000);
+
+ sender.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
+ receiver1.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
+ receiver2.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false);
}
@Override
diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java
index 6ad0c84..115dafb 100644
--- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java
@@ -53,7 +53,11 @@
@BeforeClass
public static void setUp() throws Exception {
// enable subscription
- EnvFactory.getEnv().getConfig().getCommonConfig().setSubscriptionEnabled(true);
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setSubscriptionEnabled(true)
+ .setIsPipeEnableMemoryCheck(false);
EnvFactory.getEnv().initClusterEnvironment();
ip = EnvFactory.getEnv().getIP();
diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java
index 3d04186..4c7fffc 100644
--- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java
+++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java
@@ -21,6 +21,8 @@
import org.apache.iotdb.pipe.api.event.Event;
+import java.io.File;
+
/**
* {@link TsFileInsertionEvent} is used to define the event of writing TsFile. Event data stores in
* disks, which is compressed and encoded, and requires IO cost for computational processing.
@@ -34,4 +36,12 @@
* @return {@code Iterable<TabletInsertionEvent>} the list of {@link TabletInsertionEvent}
*/
Iterable<TabletInsertionEvent> toTabletInsertionEvents();
+
+ /**
+ * Get the file that stores the data of this {@link TsFileInsertionEvent}. The file is compressed
+ * and encoded, and requires IO cost for computational processing.
+ *
+ * @return the file that stores the data of this {@link TsFileInsertionEvent}
+ */
+ File getTsFile();
}
diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java
index 81f136f..34f9872 100644
--- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java
+++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java
@@ -259,6 +259,7 @@
PIPE_RECEIVER_USER_CONFLICT_EXCEPTION(1810),
PIPE_CONFIG_RECEIVER_HANDSHAKE_NEEDED(1811),
PIPE_TRANSFER_SLICE_OUT_OF_ORDER(1812),
+ PIPE_PUSH_META_TIMEOUT(1813),
// Subscription
SUBSCRIPTION_VERSION_ERROR(1900),
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java
index 0e90174..bf14508 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java
@@ -46,11 +46,13 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
import java.util.stream.Collectors;
public class PipeConfigNodeTaskAgent extends PipeTaskAgent {
@@ -250,4 +252,10 @@
resp.setPipeRemainingEventCountList(pipeRemainingEventCountList);
resp.setPipeRemainingTimeList(pipeRemainingTimeList);
}
+
+ @Override
+ public void runPipeTasks(
+ final Collection<PipeTask> pipeTasks, final Consumer<PipeTask> runSingle) {
+ pipeTasks.forEach(runSingle);
+ }
}
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java
index 3ab7527..eb4ffc4 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java
@@ -422,12 +422,20 @@
* @return Error messages for the given pipe after pushing pipe meta
*/
public static String parsePushPipeMetaExceptionForPipe(
- String pipeName, Map<Integer, TPushPipeMetaResp> respMap) {
+ final String pipeName, final Map<Integer, TPushPipeMetaResp> respMap) {
final StringBuilder exceptionMessageBuilder = new StringBuilder();
- for (Map.Entry<Integer, TPushPipeMetaResp> respEntry : respMap.entrySet()) {
- int dataNodeId = respEntry.getKey();
- TPushPipeMetaResp resp = respEntry.getValue();
+ for (final Map.Entry<Integer, TPushPipeMetaResp> respEntry : respMap.entrySet()) {
+ final int dataNodeId = respEntry.getKey();
+ final TPushPipeMetaResp resp = respEntry.getValue();
+
+ if (resp.getStatus().getCode() == TSStatusCode.PIPE_PUSH_META_TIMEOUT.getStatusCode()) {
+ exceptionMessageBuilder.append(
+ String.format(
+ "DataNodeId: %s, Message: Timeout to wait for lock while processing pushPipeMeta on dataNodes.",
+ dataNodeId));
+ continue;
+ }
if (resp.getStatus().getCode() == TSStatusCode.PIPE_PUSH_META_ERROR.getStatusCode()) {
if (!resp.isSetExceptionMessages()) {
@@ -438,7 +446,7 @@
continue;
}
- AtomicBoolean hasException = new AtomicBoolean(false);
+ final AtomicBoolean hasException = new AtomicBoolean(false);
resp.getExceptionMessages()
.forEach(
diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java
index c2c04f9..8854ee6 100644
--- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java
+++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java
@@ -44,7 +44,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getTimestampPrecision(), deserializeReq.getTimestampPrecision());
}
@@ -57,7 +56,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
}
@Test
@@ -72,7 +70,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getFileName(), deserializeReq.getFileName());
Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset());
@@ -95,7 +92,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getFileNames(), deserializeReq.getFileNames());
Assert.assertEquals(req.getFileLengths(), deserializeReq.getFileLengths());
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 446a428..890af21 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -314,6 +314,8 @@
private String extPipeDir =
IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.EXT_PIPE_FOLDER_NAME;
+ private int pipeTaskThreadCount = 5;
+
/** External lib directory for MQTT, stores user-uploaded JAR files */
private String mqttDir =
IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.MQTT_FOLDER_NAME;
@@ -1046,6 +1048,8 @@
/** Policy of DataNodeSchemaCache eviction */
private String dataNodeSchemaCacheEvictionPolicy = "FIFO";
+ private int schemaThreadCount = 5;
+
private String readConsistencyLevel = "strong";
/** Maximum execution time of a DriverTask */
@@ -1147,8 +1151,6 @@
private long loadTsFileTabletConversionBatchMemorySizeInBytes = 4096 * 1024;
- private int loadTsFileTabletConversionThreadCount = 5;
-
private long loadChunkMetadataMemorySizeInBytes = 33554432; // 32MB
private long loadMemoryAllocateRetryIntervalMs = 1000L;
@@ -3345,6 +3347,14 @@
this.extPipeDir = extPipeDir;
}
+ public int getPipeTaskThreadCount() {
+ return pipeTaskThreadCount;
+ }
+
+ public void setPipeTaskThreadCount(int pipeTaskThreadCount) {
+ this.pipeTaskThreadCount = pipeTaskThreadCount;
+ }
+
public void setPartitionCacheSize(int partitionCacheSize) {
this.partitionCacheSize = partitionCacheSize;
}
@@ -3481,6 +3491,14 @@
this.dataNodeSchemaCacheEvictionPolicy = dataNodeSchemaCacheEvictionPolicy;
}
+ public int getSchemaThreadCount() {
+ return schemaThreadCount;
+ }
+
+ public void setSchemaThreadCount(int schemaThreadCount) {
+ this.schemaThreadCount = schemaThreadCount;
+ }
+
public String getReadConsistencyLevel() {
return readConsistencyLevel;
}
@@ -4026,14 +4044,6 @@
loadTsFileTabletConversionBatchMemorySizeInBytes;
}
- public int getLoadTsFileTabletConversionThreadCount() {
- return loadTsFileTabletConversionThreadCount;
- }
-
- public void setLoadTsFileTabletConversionThreadCount(int loadTsFileTabletConversionThreadCount) {
- this.loadTsFileTabletConversionThreadCount = loadTsFileTabletConversionThreadCount;
- }
-
public long getLoadChunkMetadataMemorySizeInBytes() {
return loadChunkMetadataMemorySizeInBytes;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 25f3095..7fbd9ce 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -955,6 +955,10 @@
}
conf.setExtPipeDir(properties.getProperty("ext_pipe_dir", conf.getExtPipeDir()).trim());
+ conf.setPipeTaskThreadCount(
+ Integer.parseInt(
+ properties.getProperty(
+ "pipe_task_thread_count", Integer.toString(conf.getPipeTaskThreadCount()).trim())));
// At the same time, set TSFileConfig
List<FSType> fsTypes = new ArrayList<>();
@@ -1091,6 +1095,11 @@
properties.getProperty(
"datanode_schema_cache_eviction_policy", conf.getDataNodeSchemaCacheEvictionPolicy()));
+ conf.setSchemaThreadCount(
+ Integer.parseInt(
+ properties.getProperty(
+ "schema_thread_count", Integer.toString(conf.getSchemaThreadCount()))));
+
loadIoTConsensusProps(properties);
loadPipeConsensusProps(properties);
@@ -2386,11 +2395,6 @@
properties.getProperty(
"load_tsfile_tablet_conversion_batch_memory_size_in_bytes",
String.valueOf(conf.getLoadTsFileTabletConversionBatchMemorySizeInBytes()))));
- conf.setLoadTsFileTabletConversionThreadCount(
- Integer.parseInt(
- properties.getProperty(
- "load_tsfile_tablet_conversion_thread_count",
- String.valueOf(conf.getLoadTsFileTabletConversionThreadCount()))));
conf.setLoadChunkMetadataMemorySizeInBytes(
Long.parseLong(
Optional.ofNullable(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java
index 96e2e4e..089d4e2 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java
@@ -36,7 +36,6 @@
import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager;
import org.apache.iotdb.db.protocol.client.ConfigNodeInfo;
import org.apache.iotdb.db.service.ResourcesInformationHolder;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache;
import org.apache.iotdb.pipe.api.exception.PipeException;
import org.apache.iotdb.rpc.TSStatusCode;
@@ -161,7 +160,6 @@
try (final ConfigNodeClient configNodeClient =
ConfigNodeClientManager.getInstance().borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) {
final TGetAllPipeInfoResp getAllPipeInfoResp = configNodeClient.getAllPipeInfo();
- WALInsertNodeCache.init();
PipeTabletEventBatch.init();
if (getAllPipeInfoResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
throw new StartupException("Failed to get pipe task meta from config node.");
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java
index 5a1f9d8..afb05fc 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java
@@ -35,7 +35,6 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
-import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent;
import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningQueue;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
@@ -86,15 +85,6 @@
PipeConfig.getInstance().printAllConfigs();
PipeAgentLauncher.launchPipeTaskAgent();
- registerPeriodicalJob(
- "PipeTaskAgent#restartAllStuckPipes",
- PipeDataNodeAgent.task()::restartAllStuckPipes,
- PipeConfig.getInstance().getPipeStuckRestartIntervalSeconds());
- registerPeriodicalJob(
- "PipeTaskAgent#flushDataRegionIfNeeded",
- PipeTerminateEvent::flushDataRegionIfNeeded,
- PipeConfig.getInstance().getPipeFlushAfterLastTerminateSeconds());
-
pipePeriodicalJobExecutor.start();
if (PipeConfig.getInstance().getPipeEventReferenceTrackingEnabled()) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java
index ef0d4c6..83854f2 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java
@@ -19,6 +19,10 @@
package org.apache.iotdb.db.pipe.agent.task;
+import org.apache.iotdb.commons.concurrent.IoTThreadFactory;
+import org.apache.iotdb.commons.concurrent.ThreadName;
+import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor;
+import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.commons.consensus.DataRegionId;
import org.apache.iotdb.commons.consensus.SchemaRegionId;
import org.apache.iotdb.commons.consensus.index.ProgressIndex;
@@ -34,8 +38,7 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant;
import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant;
-import org.apache.iotdb.commons.service.metric.MetricService;
-import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.commons.pipe.config.constant.SystemConstant;
import org.apache.iotdb.consensus.exception.ConsensusException;
import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeName;
import org.apache.iotdb.db.conf.IoTDBConfig;
@@ -45,22 +48,17 @@
import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeBuilder;
import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeTaskBuilder;
import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter;
-import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeInsertionDataNodeListener;
import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningFilter;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics;
-import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeOperateSchemaQueueNode;
import org.apache.iotdb.db.schemaengine.SchemaEngine;
import org.apache.iotdb.db.storageengine.StorageEngine;
-import org.apache.iotdb.db.storageengine.dataregion.wal.WALManager;
import org.apache.iotdb.db.subscription.agent.SubscriptionAgent;
-import org.apache.iotdb.metrics.utils.MetricLevel;
-import org.apache.iotdb.metrics.utils.SystemMetric;
import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatResp;
import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq;
import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatResp;
@@ -78,6 +76,7 @@
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@@ -86,21 +85,52 @@
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_END_TIME_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_END_TIME_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_START_TIME_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_PATH_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_PATTERN_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_ENABLE_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_START_TIME_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_END_TIME_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_ENABLE_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_END_TIME_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_START_TIME_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_PATH_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_PATTERN_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_REALTIME_ENABLE_KEY;
+import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_START_TIME_KEY;
+
public class PipeDataNodeTaskAgent extends PipeTaskAgent {
private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataNodeTaskAgent.class);
protected static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig();
- private static final AtomicLong LAST_FORCED_RESTART_TIME =
- new AtomicLong(System.currentTimeMillis());
- private static final Map<String, AtomicLong> PIPE_NAME_TO_LAST_RESTART_TIME_MAP =
- new ConcurrentHashMap<>();
+ private final ExecutorService pipeExecutor =
+ new WrappedThreadPoolExecutor(
+ 0,
+ IoTDBDescriptor.getInstance().getConfig().getPipeTaskThreadCount(),
+ 0L,
+ TimeUnit.SECONDS,
+ new ArrayBlockingQueue<>(
+ IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount()),
+ new IoTThreadFactory(ThreadName.PIPE_PARALLEL_EXECUTION_POOL.getName()),
+ ThreadName.PIPE_PARALLEL_EXECUTION_POOL.getName(),
+ new ThreadPoolExecutor.CallerRunsPolicy());
////////////////////////// Pipe Task Management Entry //////////////////////////
@@ -115,54 +145,6 @@
return new PipeDataNodeBuilder(pipeMetaFromConfigNode).build();
}
- ////////////////////////// Manage by Pipe Name //////////////////////////
-
- @Override
- protected void startPipe(final String pipeName, final long creationTime) {
- final PipeMeta existedPipeMeta = pipeMetaKeeper.getPipeMeta(pipeName);
- final PipeStatus status = existedPipeMeta.getRuntimeMeta().getStatus().get();
- if (PipeStatus.STOPPED.equals(status) || status == null) {
- restartPipeToReloadResourceIfNeeded(existedPipeMeta);
- }
-
- super.startPipe(pipeName, creationTime);
- }
-
- private void restartPipeToReloadResourceIfNeeded(final PipeMeta pipeMeta) {
- if (System.currentTimeMillis() - pipeMeta.getStaticMeta().getCreationTime()
- < PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs()) {
- return;
- }
-
- final AtomicLong lastRestartTime =
- PIPE_NAME_TO_LAST_RESTART_TIME_MAP.get(pipeMeta.getStaticMeta().getPipeName());
- if (lastRestartTime != null
- && System.currentTimeMillis() - lastRestartTime.get()
- < PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs()) {
- LOGGER.info(
- "Skipping reload resource for stopped pipe {} before starting it because reloading resource is too frequent.",
- pipeMeta.getStaticMeta().getPipeName());
- return;
- }
-
- if (PIPE_NAME_TO_LAST_RESTART_TIME_MAP.isEmpty()) {
- LOGGER.info(
- "Flushing storage engine before restarting pipe {}.",
- pipeMeta.getStaticMeta().getPipeName());
- final long currentTime = System.currentTimeMillis();
- StorageEngine.getInstance().syncCloseAllProcessor();
- WALManager.getInstance().syncDeleteOutdatedFilesInWALNodes();
- LOGGER.info(
- "Finished flushing storage engine, time cost: {} ms.",
- System.currentTimeMillis() - currentTime);
- }
-
- restartStuckPipe(pipeMeta);
- LOGGER.info(
- "Reloaded resource for stopped pipe {} before starting it.",
- pipeMeta.getStaticMeta().getPipeName());
- }
-
///////////////////////// Manage by regionGroupId /////////////////////////
@Override
@@ -306,13 +288,12 @@
@Override
protected void thawRate(final String pipeName, final long creationTime) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance().thawRate(pipeName + "_" + creationTime);
+ PipeDataNodeSinglePipeMetrics.getInstance().thawRate(pipeName + "_" + creationTime);
}
@Override
protected void freezeRate(final String pipeName, final long creationTime) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
- .freezeRate(pipeName + "_" + creationTime);
+ PipeDataNodeSinglePipeMetrics.getInstance().freezeRate(pipeName + "_" + creationTime);
}
@Override
@@ -323,7 +304,7 @@
final String taskId = pipeName + "_" + creationTime;
PipeTsFileToTabletsMetrics.getInstance().deregister(taskId);
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance().deregister(taskId);
+ PipeDataNodeSinglePipeMetrics.getInstance().deregister(taskId);
return true;
}
@@ -351,7 +332,7 @@
final long creationTime = pipeMeta.getStaticMeta().getCreationTime();
final String taskId = pipeName + "_" + creationTime;
PipeTsFileToTabletsMetrics.getInstance().deregister(taskId);
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance().deregister(taskId);
+ PipeDataNodeSinglePipeMetrics.getInstance().deregister(taskId);
// When the pipe contains no pipe tasks, there is no corresponding prefetching queue for the
// subscribed pipe, so the subscription needs to be manually marked as completed.
if (!hasPipeTasks && PipeStaticMeta.isSubscriptionPipe(pipeName)) {
@@ -379,9 +360,8 @@
///////////////////////// Heartbeat /////////////////////////
public void collectPipeMetaList(final TDataNodeHeartbeatResp resp) throws TException {
- // Try the lock instead of directly acquire it to prevent the block of the cluster heartbeat
- // 10s is the half of the HEARTBEAT_TIMEOUT_TIME defined in class BaseNodeCache in ConfigNode
- if (!tryReadLockWithTimeOut(10)) {
+ if (!tryReadLockWithTimeOut(
+ CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() * 2L / 3)) {
return;
}
try {
@@ -445,7 +425,7 @@
final boolean isCompleted = isAllDataRegionCompleted && includeDataAndNeedDrop;
final Pair<Long, Double> remainingEventAndTime =
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.getRemainingEventAndTime(staticMeta.getPipeName(), staticMeta.getCreationTime());
pipeCompletedList.add(isCompleted);
pipeRemainingEventCountList.add(remainingEventAndTime.getLeft());
@@ -528,7 +508,7 @@
final boolean isCompleted = isAllDataRegionCompleted && includeDataAndNeedDrop;
final Pair<Long, Double> remainingEventAndTime =
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.getRemainingEventAndTime(staticMeta.getPipeName(), staticMeta.getCreationTime());
pipeCompletedList.add(isCompleted);
pipeRemainingEventCountList.add(remainingEventAndTime.getLeft());
@@ -554,226 +534,6 @@
PipeInsertionDataNodeListener.getInstance().listenToHeartbeat(true);
}
- ///////////////////////// Restart Logic /////////////////////////
-
- public void restartAllStuckPipes() {
- final List<String> removedPipeName = removeOutdatedPipeInfoFromLastRestartTimeMap();
- if (!removedPipeName.isEmpty()) {
- final long currentTime = System.currentTimeMillis();
- LOGGER.info(
- "Pipes {} now can dynamically adjust their extraction strategies. "
- + "Start to flush storage engine to trigger the adjustment.",
- removedPipeName);
- StorageEngine.getInstance().syncCloseAllProcessor();
- WALManager.getInstance().syncDeleteOutdatedFilesInWALNodes();
- LOGGER.info(
- "Finished flushing storage engine, time cost: {} ms.",
- System.currentTimeMillis() - currentTime);
- LOGGER.info("Skipping restarting pipes this round because of the dynamic flushing.");
- return;
- }
-
- if (!tryWriteLockWithTimeOut(5)) {
- return;
- }
-
- final Set<PipeMeta> stuckPipes;
- try {
- stuckPipes = findAllStuckPipes();
- } finally {
- releaseWriteLock();
- }
-
- // If the pipe has been restarted recently, skip it.
- stuckPipes.removeIf(
- pipeMeta -> {
- final AtomicLong lastRestartTime =
- PIPE_NAME_TO_LAST_RESTART_TIME_MAP.get(pipeMeta.getStaticMeta().getPipeName());
- return lastRestartTime != null
- && System.currentTimeMillis() - lastRestartTime.get()
- < PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs();
- });
-
- // Restart all stuck pipes.
- // Note that parallelStream cannot be used here. The method PipeTaskAgent#dropPipe also uses
- // parallelStream. If parallelStream is used here, the subtasks generated inside the dropPipe
- // may not be scheduled by the worker thread of ForkJoinPool because of less available threads,
- // and the parent task will wait for the completion of the subtasks in ForkJoinPool forever,
- // causing the deadlock.
- stuckPipes.forEach(this::restartStuckPipe);
- }
-
- private List<String> removeOutdatedPipeInfoFromLastRestartTimeMap() {
- final List<String> removedPipeName = new ArrayList<>();
- PIPE_NAME_TO_LAST_RESTART_TIME_MAP
- .entrySet()
- .removeIf(
- entry -> {
- final AtomicLong lastRestartTime = entry.getValue();
- final boolean shouldRemove =
- lastRestartTime == null
- || PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs()
- <= System.currentTimeMillis() - lastRestartTime.get();
- if (shouldRemove) {
- removedPipeName.add(entry.getKey());
- }
- return shouldRemove;
- });
- return removedPipeName;
- }
-
- private Set<PipeMeta> findAllStuckPipes() {
- final Set<PipeMeta> stuckPipes = new HashSet<>();
-
- if (System.currentTimeMillis() - LAST_FORCED_RESTART_TIME.get()
- > PipeConfig.getInstance().getPipeSubtaskExecutorForcedRestartIntervalMs()) {
- LAST_FORCED_RESTART_TIME.set(System.currentTimeMillis());
- for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) {
- stuckPipes.add(pipeMeta);
- }
- if (!stuckPipes.isEmpty()) {
- LOGGER.warn(
- "All {} pipe(s) will be restarted because of forced restart policy.",
- stuckPipes.size());
- }
- return stuckPipes;
- }
-
- final long totalLinkedButDeletedTsFileResourceRamSize =
- PipeDataNodeResourceManager.tsfile().getTotalLinkedButDeletedTsFileResourceRamSize();
- final long totalInsertNodeFloatingMemoryUsageInBytes = getAllFloatingMemoryUsageInByte();
- final long totalFloatingMemorySizeInBytes =
- PipeMemoryManager.getTotalFloatingMemorySizeInBytes();
- if (totalInsertNodeFloatingMemoryUsageInBytes + totalLinkedButDeletedTsFileResourceRamSize
- >= totalFloatingMemorySizeInBytes) {
- for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) {
- stuckPipes.add(pipeMeta);
- }
- if (!stuckPipes.isEmpty()) {
- LOGGER.warn(
- "All {} pipe(s) will be restarted because linked but deleted tsFiles' resource size {} and all insertNode's size {} exceeds limit {}.",
- stuckPipes.size(),
- totalLinkedButDeletedTsFileResourceRamSize,
- totalInsertNodeFloatingMemoryUsageInBytes,
- totalFloatingMemorySizeInBytes);
- }
- return stuckPipes;
- }
-
- final Map<String, IoTDBDataRegionExtractor> taskId2ExtractorMap =
- PipeDataRegionExtractorMetrics.getInstance().getExtractorMap();
- for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) {
- final String pipeName = pipeMeta.getStaticMeta().getPipeName();
- final List<IoTDBDataRegionExtractor> extractors =
- taskId2ExtractorMap.values().stream()
- .filter(e -> e.getPipeName().equals(pipeName) && e.shouldExtractInsertion())
- .collect(Collectors.toList());
-
- if (extractors.isEmpty()) {
- continue;
- }
-
- // Extractors of this pipe might not pin too much MemTables,
- // still need to check if linked-and-deleted TsFile count exceeds limit.
- // Typically, if deleted tsFiles are too abundant all pipes may need to restart.
- if ((CONFIG.isEnableSeqSpaceCompaction()
- || CONFIG.isEnableUnseqSpaceCompaction()
- || CONFIG.isEnableCrossSpaceCompaction())
- && mayDeletedTsFileSizeReachDangerousThreshold()) {
- LOGGER.warn(
- "Pipe {} needs to restart because too many TsFiles are out-of-date.",
- pipeMeta.getStaticMeta());
- stuckPipes.add(pipeMeta);
- continue;
- }
-
- // Try to restart the stream mode pipes for releasing memTables.
- if (extractors.get(0).isStreamMode()) {
- if (extractors.stream().anyMatch(IoTDBDataRegionExtractor::hasConsumedAllHistoricalTsFiles)
- && (mayMemTablePinnedCountReachDangerousThreshold()
- || mayWalSizeReachThrottleThreshold())) {
- // Extractors of this pipe may be stuck and is pinning too many MemTables.
- LOGGER.warn(
- "Pipe {} needs to restart because too many memTables are pinned or the WAL size is too large. mayMemTablePinnedCountReachDangerousThreshold: {}, mayWalSizeReachThrottleThreshold: {}",
- pipeMeta.getStaticMeta(),
- mayMemTablePinnedCountReachDangerousThreshold(),
- mayWalSizeReachThrottleThreshold());
- stuckPipes.add(pipeMeta);
- }
- }
- }
-
- return stuckPipes;
- }
-
- private boolean mayDeletedTsFileSizeReachDangerousThreshold() {
- try {
- final long linkedButDeletedTsFileSize =
- PipeDataNodeResourceManager.tsfile().getTotalLinkedButDeletedTsfileSize();
- final double totalDisk =
- MetricService.getInstance()
- .getAutoGauge(
- SystemMetric.SYS_DISK_TOTAL_SPACE.toString(),
- MetricLevel.CORE,
- Tag.NAME.toString(),
- // This "system" should stay the same with the one in
- // DataNodeInternalRPCServiceImpl.
- "system")
- .getValue();
- return linkedButDeletedTsFileSize > 0
- && totalDisk > 0
- && linkedButDeletedTsFileSize
- > PipeConfig.getInstance().getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage()
- * totalDisk;
- } catch (final Exception e) {
- LOGGER.warn("Failed to judge if deleted TsFile size reaches dangerous threshold.", e);
- return false;
- }
- }
-
- private boolean mayMemTablePinnedCountReachDangerousThreshold() {
- return PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount() != Integer.MAX_VALUE
- && PipeDataNodeResourceManager.wal().getPinnedWalCount()
- >= 5
- * PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount()
- * StorageEngine.getInstance().getDataRegionNumber();
- }
-
- private boolean mayWalSizeReachThrottleThreshold() {
- return 3 * WALManager.getInstance().getTotalDiskUsage() > 2 * CONFIG.getThrottleThreshold();
- }
-
- private void restartStuckPipe(final PipeMeta pipeMeta) {
- LOGGER.warn(
- "Pipe {} will be restarted because it is stuck or has encountered issues such as data backlog or being stopped for too long.",
- pipeMeta.getStaticMeta());
- acquireWriteLock();
- try {
- final long startTime = System.currentTimeMillis();
- final PipeMeta originalPipeMeta = pipeMeta.deepCopy4TaskAgent();
- handleDropPipe(pipeMeta.getStaticMeta().getPipeName());
-
- final long restartTime = System.currentTimeMillis();
- PIPE_NAME_TO_LAST_RESTART_TIME_MAP
- .computeIfAbsent(pipeMeta.getStaticMeta().getPipeName(), k -> new AtomicLong(restartTime))
- .set(restartTime);
- handleSinglePipeMetaChanges(originalPipeMeta);
-
- LOGGER.warn(
- "Pipe {} was restarted because of stuck or data backlog, time cost: {} ms.",
- originalPipeMeta.getStaticMeta(),
- System.currentTimeMillis() - startTime);
- } catch (final Exception e) {
- LOGGER.warn("Failed to restart stuck pipe {}.", pipeMeta.getStaticMeta(), e);
- } finally {
- releaseWriteLock();
- }
- }
-
- public boolean isPipeTaskCurrentlyRestarted(final String pipeName) {
- return PIPE_NAME_TO_LAST_RESTART_TIME_MAP.containsKey(pipeName);
- }
-
///////////////////////// Terminate Logic /////////////////////////
public void markCompleted(final String pipeName, final int regionId) {
@@ -817,6 +577,48 @@
}
}
+ @Override
+ public void runPipeTasks(
+ final Collection<PipeTask> pipeTasks, final Consumer<PipeTask> runSingle) {
+ final Set<Future<?>> pipeFuture = new HashSet<>();
+
+ pipeTasks.forEach(
+ pipeTask -> pipeFuture.add(pipeExecutor.submit(() -> runSingle.accept(pipeTask))));
+
+ for (final Future<?> future : pipeFuture) {
+ try {
+ future.get();
+ } catch (final ExecutionException | InterruptedException e) {
+ LOGGER.warn("Exception occurs when executing pipe task: ", e);
+ throw new PipeException(e.toString());
+ }
+ }
+ }
+
+ ///////////////////////// Shutdown Logic /////////////////////////
+
+ public void persistAllProgressIndexLocally() {
+ if (!PipeConfig.getInstance().isPipeProgressIndexPersistEnabled()) {
+ LOGGER.info(
+ "Pipe progress index persist disabled. Skipping persist all progress index locally.");
+ return;
+ }
+ if (!tryReadLockWithTimeOut(10)) {
+ LOGGER.info("Failed to persist all progress index locally because of timeout.");
+ return;
+ }
+ try {
+ for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) {
+ pipeMeta.getRuntimeMeta().persistProgressIndex();
+ }
+ LOGGER.info("Persist all progress index locally successfully.");
+ } catch (final Exception e) {
+ LOGGER.warn("Failed to record all progress index locally, because {}.", e.getMessage(), e);
+ } finally {
+ releaseReadLock();
+ }
+ }
+
///////////////////////// Pipe Consensus /////////////////////////
public ProgressIndex getPipeTaskProgressIndex(final String pipeName, final int consensusGroupId) {
@@ -859,4 +661,203 @@
releaseReadLock();
}
}
+
+ @Override
+ protected void calculateMemoryUsage(
+ final PipeParameters extractorParameters,
+ final PipeParameters processorParameters,
+ final PipeParameters connectorParameters) {
+ if (!PipeConfig.getInstance().isPipeEnableMemoryCheck()) {
+ return;
+ }
+
+ calculateInsertNodeQueueMemory(extractorParameters, processorParameters, connectorParameters);
+
+ long needMemory = 0;
+
+ needMemory +=
+ calculateTsFileParserMemory(extractorParameters, processorParameters, connectorParameters);
+ needMemory +=
+ calculateSinkBatchMemory(extractorParameters, processorParameters, connectorParameters);
+ needMemory +=
+ calculateSendTsFileReadBufferMemory(
+ extractorParameters, processorParameters, connectorParameters);
+
+ PipeMemoryManager pipeMemoryManager = PipeDataNodeResourceManager.memory();
+ final long freeMemorySizeInBytes = pipeMemoryManager.getFreeMemorySizeInBytes();
+ final long reservedMemorySizeInBytes =
+ (long)
+ (PipeMemoryManager.getTotalMemorySizeInBytes()
+ * PipeConfig.getInstance().getReservedMemoryPercentage());
+ if (freeMemorySizeInBytes < needMemory + reservedMemorySizeInBytes) {
+ final String message =
+ String.format(
+ "Not enough memory for pipe. Need memory: %d bytes, free memory: %d bytes, reserved memory: %d bytes, total memory: %d bytes",
+ needMemory,
+ freeMemorySizeInBytes,
+ freeMemorySizeInBytes,
+ PipeMemoryManager.getTotalMemorySizeInBytes());
+ LOGGER.warn(message);
+ throw new PipeException(message);
+ }
+ }
+
+ private void calculateInsertNodeQueueMemory(
+ final PipeParameters extractorParameters,
+ final PipeParameters processorParameters,
+ final PipeParameters connectorParameters) {
+
+ // Realtime extractor is enabled by default, so we only need to check the source realtime
+ if (!extractorParameters.getBooleanOrDefault(
+ Arrays.asList(EXTRACTOR_REALTIME_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY),
+ EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE)) {
+ return;
+ }
+
+ // If the realtime mode is batch or file, we do not need to allocate memory
+ final String realtimeMode =
+ extractorParameters.getStringByKeys(
+ PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_KEY,
+ PipeExtractorConstant.SOURCE_REALTIME_MODE_KEY);
+ if (PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_BATCH_MODE_VALUE.equals(realtimeMode)
+ || PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_FILE_VALUE.equals(realtimeMode)) {
+ return;
+ }
+
+ final long allocatedMemorySizeInBytes = this.getAllFloatingMemoryUsageInByte();
+ final long remainingMemory =
+ PipeMemoryManager.getTotalFloatingMemorySizeInBytes() - allocatedMemorySizeInBytes;
+ if (remainingMemory < PipeConfig.getInstance().PipeInsertNodeQueueMemory()) {
+ final String message =
+ String.format(
+ "Not enough memory for pipe. Need Floating memory: %d bytes, free Floating memory: %d bytes",
+ PipeConfig.getInstance().PipeInsertNodeQueueMemory(), remainingMemory);
+ LOGGER.warn(message);
+ throw new PipeException(message);
+ }
+ }
+
+ private long calculateTsFileParserMemory(
+ final PipeParameters extractorParameters,
+ final PipeParameters processorParameters,
+ final PipeParameters connectorParameters) {
+
+ // If the extractor is not history, we do not need to allocate memory
+ boolean isExtractorHistory =
+ extractorParameters.getBooleanOrDefault(
+ SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE)
+ || extractorParameters.getBooleanOrDefault(
+ Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY),
+ EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE);
+
+ // If the extractor is history, and has start/end time, we need to allocate memory
+ boolean isTSFileParser =
+ isExtractorHistory
+ && extractorParameters.hasAnyAttributes(
+ EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY);
+
+ isTSFileParser =
+ isTSFileParser
+ || (isExtractorHistory
+ && extractorParameters.hasAnyAttributes(
+ EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY));
+
+ // if the extractor has start/end time, we need to allocate memory
+ isTSFileParser =
+ isTSFileParser
+ || extractorParameters.hasAnyAttributes(
+ SOURCE_START_TIME_KEY, EXTRACTOR_START_TIME_KEY);
+
+ isTSFileParser =
+ isTSFileParser
+ || extractorParameters.hasAnyAttributes(SOURCE_END_TIME_KEY, EXTRACTOR_END_TIME_KEY);
+
+ // If the extractor has pattern or path, we need to allocate memory
+ isTSFileParser =
+ isTSFileParser
+ || extractorParameters.hasAnyAttributes(EXTRACTOR_PATTERN_KEY, SOURCE_PATTERN_KEY);
+
+ isTSFileParser =
+ isTSFileParser || extractorParameters.hasAnyAttributes(EXTRACTOR_PATH_KEY, SOURCE_PATH_KEY);
+
+ // If the extractor is not hybrid, we do need to allocate memory
+ isTSFileParser =
+ isTSFileParser
+ || !PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE.equals(
+ connectorParameters.getStringOrDefault(
+ Arrays.asList(
+ PipeConnectorConstant.CONNECTOR_FORMAT_KEY,
+ PipeConnectorConstant.SINK_FORMAT_KEY),
+ PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE));
+
+ if (!isTSFileParser) {
+ return 0;
+ }
+
+ return PipeConfig.getInstance().getTsFileParserMemory();
+ }
+
+ private long calculateSinkBatchMemory(
+ final PipeParameters extractorParameters,
+ final PipeParameters processorParameters,
+ final PipeParameters connectorParameters) {
+
+ // If the connector format is tsfile , we need to use batch
+ boolean needUseBatch =
+ PipeConnectorConstant.CONNECTOR_FORMAT_TS_FILE_VALUE.equals(
+ connectorParameters.getStringOrDefault(
+ Arrays.asList(
+ PipeConnectorConstant.CONNECTOR_FORMAT_KEY,
+ PipeConnectorConstant.SINK_FORMAT_KEY),
+ PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE));
+
+ if (needUseBatch) {
+ return PipeConfig.getInstance().getSinkBatchMemoryTsFile();
+ }
+
+ // If the connector is batch mode, we need to use batch
+ needUseBatch =
+ connectorParameters.getBooleanOrDefault(
+ Arrays.asList(
+ PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_KEY,
+ PipeConnectorConstant.SINK_IOTDB_BATCH_MODE_ENABLE_KEY),
+ PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_DEFAULT_VALUE);
+
+ if (!needUseBatch) {
+ return 0;
+ }
+
+ return PipeConfig.getInstance().getSinkBatchMemoryInsertNode();
+ }
+
+ private long calculateSendTsFileReadBufferMemory(
+ final PipeParameters extractorParameters,
+ final PipeParameters processorParameters,
+ final PipeParameters connectorParameters) {
+ // If the extractor is history enable, we need to transfer tsfile
+ boolean needTransferTsFile =
+ extractorParameters.getBooleanOrDefault(
+ SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE)
+ || extractorParameters.getBooleanOrDefault(
+ Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY),
+ EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE);
+
+ String format =
+ connectorParameters.getStringOrDefault(
+ Arrays.asList(
+ PipeConnectorConstant.CONNECTOR_FORMAT_KEY, PipeConnectorConstant.SINK_FORMAT_KEY),
+ PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE);
+
+ // If the connector format is tsfile and hybrid, we need to transfer tsfile
+ needTransferTsFile =
+ needTransferTsFile
+ || PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE.equals(format)
+ || PipeConnectorConstant.CONNECTOR_FORMAT_TS_FILE_VALUE.equals(format);
+
+ if (!needTransferTsFile) {
+ return 0;
+ }
+
+ return PipeConfig.getInstance().getSendTsFileReadBuffer();
+ }
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java
index ecbbc64..6e4a858 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java
@@ -21,6 +21,7 @@
import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue;
import org.apache.iotdb.db.pipe.agent.task.execution.PipeConnectorSubtaskExecutor;
+import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.pipe.api.event.Event;
import org.slf4j.Logger;
@@ -65,6 +66,8 @@
if (registeredTaskCount == 0) {
executor.register(subtask);
runningTaskCount = 0;
+
+ PipeDataNodeResourceManager.compaction().registerPipeConnectorSubtaskLifeCycle(this);
}
registeredTaskCount++;
@@ -152,5 +155,7 @@
@Override
public synchronized void close() {
executor.deregister(subtask.getTaskID());
+
+ PipeDataNodeResourceManager.compaction().deregisterPipeConnectorSubtaskLifeCycle(this);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java
index a4f0544..5dc0c4b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java
@@ -21,23 +21,40 @@
import org.apache.iotdb.commons.pipe.agent.task.connection.BlockingPendingQueue;
import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue;
+import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
+import org.apache.iotdb.db.pipe.event.common.tsfile.PipeCompactedTsFileInsertionEvent;
+import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter;
+import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
import org.apache.iotdb.pipe.api.event.Event;
import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
import java.util.Objects;
+import java.util.Set;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
+import java.util.stream.Collectors;
public class PipeRealtimePriorityBlockingQueue extends UnboundedBlockingPendingQueue<Event> {
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(PipeRealtimePriorityBlockingQueue.class);
+
private static final PipeConfig PIPE_CONFIG = PipeConfig.getInstance();
private final BlockingDeque<TsFileInsertionEvent> tsfileInsertEventDeque =
@@ -55,7 +72,7 @@
}
@Override
- public boolean directOffer(final Event event) {
+ public synchronized boolean directOffer(final Event event) {
checkBeforeOffer(event);
if (event instanceof TsFileInsertionEvent) {
@@ -73,18 +90,18 @@
}
@Override
- public boolean waitedOffer(final Event event) {
+ public synchronized boolean waitedOffer(final Event event) {
return directOffer(event);
}
@Override
- public boolean put(final Event event) {
+ public synchronized boolean put(final Event event) {
directOffer(event);
return true;
}
@Override
- public Event directPoll() {
+ public synchronized Event directPoll() {
Event event = null;
final int pollHistoricalTsFileThreshold =
PIPE_CONFIG.getPipeRealTimeQueuePollHistoricalTsFileThreshold();
@@ -129,7 +146,7 @@
* available.
*/
@Override
- public Event waitedPoll() {
+ public synchronized Event waitedPoll() {
Event event = null;
final int pollHistoricalTsFileThreshold =
PIPE_CONFIG.getPipeRealTimeQueuePollHistoricalTsFileThreshold();
@@ -176,7 +193,7 @@
}
@Override
- public Event peek() {
+ public synchronized Event peek() {
final Event event = pendingQueue.peek();
if (Objects.nonNull(event)) {
return event;
@@ -184,20 +201,135 @@
return tsfileInsertEventDeque.peek();
}
+ public synchronized void replace(
+ String dataRegionId, Set<TsFileResource> sourceFiles, List<TsFileResource> targetFiles) {
+ // TODO: return if pipe sink is for pipe consensus
+
+ final int regionId = Integer.parseInt(dataRegionId);
+ final Map<CommitterKey, Set<PipeTsFileInsertionEvent>> eventsToBeRemovedGroupByCommitterKey =
+ tsfileInsertEventDeque.stream()
+ .filter(
+ event ->
+ event instanceof PipeTsFileInsertionEvent
+ && ((PipeTsFileInsertionEvent) event).getRegionId() == regionId)
+ .map(event -> (PipeTsFileInsertionEvent) event)
+ .collect(
+ Collectors.groupingBy(
+ PipeTsFileInsertionEvent::getCommitterKey, Collectors.toSet()))
+ .entrySet()
+ .stream()
+ // Replace if all source files are present in the queue
+ .filter(entry -> entry.getValue().size() == sourceFiles.size())
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+ if (eventsToBeRemovedGroupByCommitterKey.isEmpty()) {
+ return;
+ }
+
+ final Map<CommitterKey, Set<PipeTsFileInsertionEvent>> eventsToBeAddedGroupByCommitterKey =
+ new HashMap<>();
+ for (final Map.Entry<CommitterKey, Set<PipeTsFileInsertionEvent>> entry :
+ eventsToBeRemovedGroupByCommitterKey.entrySet()) {
+ final CommitterKey committerKey = entry.getKey();
+ final PipeTsFileInsertionEvent anyEvent = entry.getValue().stream().findFirst().orElse(null);
+ final Set<PipeTsFileInsertionEvent> newEvents = new HashSet<>();
+ for (int i = 0; i < targetFiles.size(); i++) {
+ newEvents.add(
+ new PipeCompactedTsFileInsertionEvent(
+ committerKey,
+ entry.getValue(),
+ anyEvent,
+ targetFiles.get(i),
+ i == targetFiles.size() - 1));
+ }
+ eventsToBeAddedGroupByCommitterKey.put(committerKey, newEvents);
+ }
+
+ // Handling new events
+ final Set<PipeTsFileInsertionEvent> successfullyReferenceIncreasedEvents = new HashSet<>();
+ final AtomicBoolean
+ allSuccess = // To track if all events successfully increased the reference count
+ new AtomicBoolean(true);
+ outerLoop:
+ for (final Map.Entry<CommitterKey, Set<PipeTsFileInsertionEvent>> committerKeySetEntry :
+ eventsToBeAddedGroupByCommitterKey.entrySet()) {
+ for (final PipeTsFileInsertionEvent event : committerKeySetEntry.getValue()) {
+ if (event != null) {
+ try {
+ if (!event.increaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName())) {
+ allSuccess.set(false);
+ break outerLoop;
+ } else {
+ successfullyReferenceIncreasedEvents.add(event);
+ }
+ } catch (final Exception e) {
+ allSuccess.set(false);
+ break outerLoop;
+ }
+ }
+ }
+ }
+ if (!allSuccess.get()) {
+ // If any event failed to increase the reference count,
+ // we need to decrease the reference count for all successfully increased events
+ for (final PipeTsFileInsertionEvent event : successfullyReferenceIncreasedEvents) {
+ try {
+ event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false);
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue",
+ event,
+ e);
+ }
+ }
+ return; // Exit early if any event failed to increase the reference count
+ } else {
+ // If all events successfully increased reference count,
+ // we can proceed to add them to the deque
+ for (final PipeTsFileInsertionEvent event : successfullyReferenceIncreasedEvents) {
+ tsfileInsertEventDeque.add(event);
+ eventCounter.increaseEventCount(event);
+ }
+ }
+
+ // Handling old events
+ for (final Map.Entry<CommitterKey, Set<PipeTsFileInsertionEvent>> entry :
+ eventsToBeRemovedGroupByCommitterKey.entrySet()) {
+ for (final PipeTsFileInsertionEvent event : entry.getValue()) {
+ if (event != null) {
+ try {
+ event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false);
+ } catch (final Exception e) {
+ LOGGER.warn(
+ "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue",
+ event,
+ e);
+ }
+ eventCounter.decreaseEventCount(event);
+ }
+ }
+ }
+ final Set<PipeTsFileInsertionEvent> eventsToRemove = new HashSet<>();
+ for (Set<PipeTsFileInsertionEvent> pipeTsFileInsertionEvents :
+ eventsToBeRemovedGroupByCommitterKey.values()) {
+ eventsToRemove.addAll(pipeTsFileInsertionEvents);
+ }
+ tsfileInsertEventDeque.removeIf(eventsToRemove::contains);
+ }
+
@Override
- public void clear() {
+ public synchronized void clear() {
super.clear();
tsfileInsertEventDeque.clear();
}
@Override
- public void forEach(final Consumer<? super Event> action) {
+ public synchronized void forEach(final Consumer<? super Event> action) {
super.forEach(action);
tsfileInsertEventDeque.forEach(action);
}
@Override
- public void discardAllEvents() {
+ public synchronized void discardAllEvents() {
super.discardAllEvents();
tsfileInsertEventDeque.removeIf(
event -> {
@@ -212,7 +344,7 @@
}
@Override
- public void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) {
+ public synchronized void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) {
super.discardEventsOfPipe(pipeNameToDrop, regionId);
tsfileInsertEventDeque.removeIf(
event -> {
@@ -244,7 +376,7 @@
return tsfileInsertEventDeque.size();
}
- public void setOfferTsFileCounter(AtomicInteger offerTsFileCounter) {
+ public synchronized void setOfferTsFileCounter(AtomicInteger offerTsFileCounter) {
this.offerTsFileCounter = offerTsFileCounter;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java
index c8a0acf..d39feb9 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/processor/PipeProcessorSubtask.java
@@ -31,7 +31,7 @@
import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector;
import org.apache.iotdb.db.pipe.event.UserDefinedEnrichedEvent;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.metric.processor.PipeProcessorMetrics;
import org.apache.iotdb.db.pipe.processor.pipeconsensus.PipeConsensusProcessor;
import org.apache.iotdb.db.storageengine.StorageEngine;
@@ -140,7 +140,7 @@
} else if (event instanceof TsFileInsertionEvent) {
pipeProcessor.process((TsFileInsertionEvent) event, outputEventCollector);
PipeProcessorMetrics.getInstance().markTsFileEvent(taskID);
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.markTsFileCollectInvocationCount(
pipeNameWithCreationTime, outputEventCollector.getCollectInvocationCount());
} else if (event instanceof PipeHeartbeatEvent) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java
index 1cb9f50..78a237a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java
@@ -22,6 +22,7 @@
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
+import org.apache.iotdb.db.pipe.resource.memory.PipeDynamicMemoryBlock;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlockType;
import org.apache.iotdb.db.pipe.resource.memory.PipeModelFixedMemoryBlock;
import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException;
@@ -47,7 +48,7 @@
private long firstEventProcessingTime = Long.MIN_VALUE;
protected long totalBufferSize = 0;
- private final PipeModelFixedMemoryBlock allocatedMemoryBlock;
+ private final PipeDynamicMemoryBlock allocatedMemoryBlock;
protected volatile boolean isClosed = false;
@@ -60,11 +61,9 @@
// limit in buffer size
this.allocatedMemoryBlock =
- PipeDataNodeResourceManager.memory()
- .forceAllocateForModelFixedMemoryBlock(
- requestMaxBatchSizeInBytes, PipeMemoryBlockType.BATCH);
+ pipeModelFixedMemoryBlock.registerPipeBatchMemoryBlock(requestMaxBatchSizeInBytes);
- if (getMaxBatchSizeInBytes() != requestMaxBatchSizeInBytes) {
+ if (getMaxBatchSizeInBytes() != allocatedMemoryBlock.getMemoryUsageInBytes()) {
LOGGER.info(
"PipeTabletEventBatch: the max batch size is adjusted from {} to {} due to the "
+ "memory restriction",
@@ -127,8 +126,12 @@
throws WALPipeException, IOException;
public boolean shouldEmit() {
- return totalBufferSize >= getMaxBatchSizeInBytes()
- || System.currentTimeMillis() - firstEventProcessingTime >= maxDelayInMs;
+ final long diff = System.currentTimeMillis() - firstEventProcessingTime;
+ if (totalBufferSize >= getMaxBatchSizeInBytes() || diff >= maxDelayInMs) {
+ allocatedMemoryBlock.updateCurrentMemoryEfficiencyAdjustMem((double) diff / maxDelayInMs);
+ return true;
+ }
+ return false;
}
private long getMaxBatchSizeInBytes() {
@@ -194,9 +197,22 @@
}
try {
- pipeModelFixedMemoryBlock =
- PipeDataNodeResourceManager.memory()
- .forceAllocateForModelFixedMemoryBlock(0L, PipeMemoryBlockType.BATCH);
+ long batchSize = PipeDataNodeResourceManager.memory().getAllocatedMemorySizeInBytesOfBatch();
+ for (long i = batchSize; i > 0; i = i / 2) {
+ try {
+ pipeModelFixedMemoryBlock =
+ PipeDataNodeResourceManager.memory()
+ .forceAllocateForModelFixedMemoryBlock(i, PipeMemoryBlockType.BATCH);
+
+ LOGGER.info("pipe model fixed memory block initialized with size: {} bytes", i);
+ return;
+ } catch (Exception ignore) {
+ // ignore the exception and try to allocate a smaller size
+ LOGGER.info(
+ "pipe model fixed memory block initialized with size: {} bytes failed, try smaller size",
+ i);
+ }
+ }
} catch (Exception e) {
LOGGER.error("init pipe model fixed memory block failed", e);
// If the allocation fails, we still need to create a default memory block to avoid NPE.
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java
index 292fc01..33f228f 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java
@@ -102,8 +102,7 @@
(PipeInsertNodeTabletInsertionEvent) event;
// Read the bytebuffer from the wal file and transfer it directly without serializing or
// deserializing if possible
- final InsertNode insertNode =
- pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
if (Objects.isNull(insertNode)) {
buffer = pipeInsertNodeTabletInsertionEvent.getByteBuffer();
binaryBuffers.add(buffer);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java
index 580dbe2..553a04c 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java
@@ -66,7 +66,6 @@
planNodeReq.version = transferReq.version;
planNodeReq.type = transferReq.type;
- planNodeReq.body = transferReq.body;
return planNodeReq;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java
index 8090f65..27664cd 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java
@@ -182,7 +182,6 @@
batchReq.version = transferReq.version;
batchReq.type = transferReq.type;
- batchReq.body = transferReq.body;
return batchReq;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java
index 5e9e0a3..4deac8a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java
@@ -89,7 +89,6 @@
binaryReq.version = transferReq.version;
binaryReq.type = transferReq.type;
- binaryReq.body = transferReq.body;
return binaryReq;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java
index c45417b..376ca12 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java
@@ -96,7 +96,6 @@
insertNodeReq.version = transferReq.version;
insertNodeReq.type = transferReq.type;
- insertNodeReq.body = transferReq.body;
return insertNodeReq;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java
index 61790f8..f1e3278 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java
@@ -132,7 +132,6 @@
tabletReq.version = transferReq.version;
tabletReq.type = transferReq.type;
- tabletReq.body = transferReq.body;
return tabletReq;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java
index ee35ab4..bc4805e 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java
@@ -168,8 +168,7 @@
final AirGapSocket socket,
final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent)
throws PipeException, WALPipeException, IOException {
- final InsertNode insertNode =
- pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
final byte[] bytes =
Objects.isNull(insertNode)
? PipeTransferTabletBinaryReq.toTPipeTransferBytes(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java
index a74057c..1756da3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java
@@ -277,8 +277,7 @@
return;
}
- final InsertNode insertNode =
- pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
final ProgressIndex progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex();
final TPipeConsensusTransferReq pipeConsensusTransferReq =
Objects.isNull(insertNode)
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java
index f469078..6186d9f 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java
@@ -252,7 +252,7 @@
try (final SyncPipeConsensusServiceClient syncPipeConsensusServiceClient =
syncRetryClientManager.borrowClient(getFollowerUrl())) {
- insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex();
if (insertNode != null) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java
index 870cea0..a3f8fe5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java
@@ -196,8 +196,7 @@
// Read the bytebuffer from the wal file and transfer it directly without serializing or
// deserializing if possible
- final InsertNode insertNode =
- pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
// PipeConsensus will transfer binary data to TPipeConsensusTransferReq
final ProgressIndex progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex();
if (Objects.isNull(insertNode)) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java
index fd80f47..62871f0 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java
@@ -265,8 +265,7 @@
return false;
}
- final InsertNode insertNode =
- pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
final TPipeTransferReq pipeTransferReq =
compressIfNeeded(
Objects.isNull(insertNode)
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java
index f77d41b..b3eb223 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java
@@ -104,7 +104,7 @@
? ((EnrichedEvent) event).coreReportMessage()
: event.toString(),
event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitterKey() : null,
- event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitId() : null,
+ event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitIds() : null,
exception);
} finally {
connector.addFailureEventToRetryQueue(event);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java
index ccce6ba..1c586ce 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java
@@ -285,7 +285,7 @@
"Successfully transferred file {} (committer key={}, commit id={}, reference count={}).",
tsFile,
events.stream().map(EnrichedEvent::getCommitterKey).collect(Collectors.toList()),
- events.stream().map(EnrichedEvent::getCommitId).collect(Collectors.toList()),
+ events.stream().map(EnrichedEvent::getCommitIds).collect(Collectors.toList()),
referenceCount);
} else {
LOGGER.info(
@@ -350,7 +350,7 @@
"Failed to transfer TsFileInsertionEvent {} (committer key {}, commit id {}).",
tsFile,
events.stream().map(EnrichedEvent::getCommitterKey).collect(Collectors.toList()),
- events.stream().map(EnrichedEvent::getCommitId).collect(Collectors.toList()),
+ events.stream().map(EnrichedEvent::getCommitIds).collect(Collectors.toList()),
exception);
} else {
LOGGER.warn(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java
index 784ee14..d996cac 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java
@@ -294,8 +294,7 @@
// getDeviceId() may return null for InsertRowsNode, will be equal to getClient(null)
clientAndStatus = clientManager.getClient(pipeInsertNodeTabletInsertionEvent.getDeviceId());
- final InsertNode insertNode =
- pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
final TPipeTransferReq req =
compressIfNeeded(
insertNode != null
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java
index 7d28f86..365950c 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java
@@ -129,8 +129,7 @@
throws PipeException, WALPipeException {
final TSStatus status;
- final InsertNode insertNode =
- pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode();
if (Objects.isNull(insertNode)) {
status =
PipeDataNodeAgent.receiver()
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java
index 19c3b0c..6860847 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java
@@ -25,7 +25,7 @@
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.metric.overview.PipeHeartbeatEventMetrics;
import org.apache.iotdb.db.utils.DateTimeUtils;
import org.apache.iotdb.pipe.api.event.Event;
@@ -43,7 +43,6 @@
private final String dataRegionId;
private long timePublished;
- private long timeAssigned;
private long timeProcessed;
private long timeTransferred;
@@ -83,7 +82,7 @@
@Override
public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) {
if (Objects.nonNull(pipeName)) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.increaseHeartbeatEventCount(pipeName, creationTime);
}
return true;
@@ -94,7 +93,7 @@
// PipeName == null indicates that the event is the raw event at disruptor,
// not the event copied and passed to the extractor
if (Objects.nonNull(pipeName)) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.decreaseHeartbeatEventCount(pipeName, creationTime);
if (shouldPrintMessage && LOGGER.isDebugEnabled()) {
LOGGER.debug(this.toString());
@@ -151,22 +150,12 @@
}
}
- public void onAssigned() {
- if (shouldPrintMessage) {
- timeAssigned = System.currentTimeMillis();
- if (timePublished != 0) {
- PipeHeartbeatEventMetrics.getInstance()
- .recordPublishedToAssignedTime(timeAssigned - timePublished);
- }
- }
- }
-
public void onProcessed() {
if (shouldPrintMessage) {
timeProcessed = System.currentTimeMillis();
- if (timeAssigned != 0) {
+ if (timePublished != 0) {
PipeHeartbeatEventMetrics.getInstance()
- .recordAssignedToProcessedTime(timeProcessed - timeAssigned);
+ .recordAssignedToProcessedTime(timeProcessed - timePublished);
}
}
}
@@ -223,13 +212,9 @@
(timePublished != 0)
? DateTimeUtils.convertLongToDate(timePublished, "ms")
: unknownMessage;
- final String publishedToAssignedMessage =
- (timeAssigned != 0 && timePublished != 0)
- ? (timeAssigned - timePublished) + "ms"
- : unknownMessage;
final String assignedToProcessedMessage =
- (timeProcessed != 0 && timeAssigned != 0)
- ? (timeProcessed - timeAssigned) + "ms"
+ (timeProcessed != 0 && timePublished != 0)
+ ? (timeProcessed - timePublished) + "ms"
: unknownMessage;
final String processedToTransferredMessage =
(timeTransferred != 0 && timeProcessed != 0)
@@ -243,11 +228,11 @@
final String disruptorSizeMessage = Integer.toString(disruptorSize);
final String extractorQueueTabletSizeMessage =
- timeAssigned != 0 ? Integer.toString(extractorQueueTabletSize) : unknownMessage;
+ timePublished != 0 ? Integer.toString(extractorQueueTabletSize) : unknownMessage;
final String extractorQueueTsFileSizeMessage =
- timeAssigned != 0 ? Integer.toString(extractorQueueTsFileSize) : unknownMessage;
+ timePublished != 0 ? Integer.toString(extractorQueueTsFileSize) : unknownMessage;
final String extractorQueueSizeMessage =
- timeAssigned != 0 ? Integer.toString(extractorQueueSize) : unknownMessage;
+ timePublished != 0 ? Integer.toString(extractorQueueSize) : unknownMessage;
final String connectorQueueTabletSizeMessage =
timeProcessed != 0 ? Integer.toString(connectorQueueTabletSize) : unknownMessage;
@@ -263,9 +248,7 @@
+ dataRegionId
+ ", startTime="
+ startTimeMessage
- + ", publishedToAssigned="
- + publishedToAssignedMessage
- + ", assignedToProcessed="
+ + ", publishedToProcessed="
+ assignedToProcessedMessage
+ ", processedToTransferred="
+ processedToTransferredMessage
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java
index e781491..6951a02 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java
@@ -28,8 +28,9 @@
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager.PipeEventResource;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
+import org.apache.iotdb.db.pipe.resource.memory.InsertNodeMemoryEstimator;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil;
import org.apache.iotdb.db.pipe.resource.memory.PipeTabletMemoryBlock;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
@@ -37,8 +38,6 @@
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode;
import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryPosition;
import org.apache.iotdb.pipe.api.access.Row;
import org.apache.iotdb.pipe.api.collector.RowCollector;
import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
@@ -69,52 +68,26 @@
LoggerFactory.getLogger(PipeInsertNodeTabletInsertionEvent.class);
private static final long INSTANCE_SIZE =
RamUsageEstimator.shallowSizeOfInstance(PipeInsertNodeTabletInsertionEvent.class)
- + RamUsageEstimator.shallowSizeOfInstance(WALEntryHandler.class)
- + RamUsageEstimator.shallowSizeOfInstance(WALEntryPosition.class)
+ RamUsageEstimator.shallowSizeOfInstance(AtomicInteger.class)
+ RamUsageEstimator.shallowSizeOfInstance(AtomicBoolean.class);
- private final WALEntryHandler walEntryHandler;
- private final boolean isAligned;
- private final boolean isGeneratedByPipe;
-
private final AtomicReference<PipeTabletMemoryBlock> allocatedMemoryBlock;
private volatile List<Tablet> tablets;
private List<TabletInsertionDataContainer> dataContainers;
- private final PartialPath devicePath;
+ private InsertNode insertNode;
private ProgressIndex progressIndex;
private long extractTime = 0;
- public PipeInsertNodeTabletInsertionEvent(
- final WALEntryHandler walEntryHandler,
- final PartialPath devicePath,
- final ProgressIndex progressIndex,
- final boolean isAligned,
- final boolean isGeneratedByPipe) {
- this(
- walEntryHandler,
- devicePath,
- progressIndex,
- isAligned,
- isGeneratedByPipe,
- null,
- 0,
- null,
- null,
- Long.MIN_VALUE,
- Long.MAX_VALUE);
+ public PipeInsertNodeTabletInsertionEvent(final InsertNode insertNode) {
+ this(insertNode, null, 0, null, null, Long.MIN_VALUE, Long.MAX_VALUE);
}
private PipeInsertNodeTabletInsertionEvent(
- final WALEntryHandler walEntryHandler,
- final PartialPath devicePath,
- final ProgressIndex progressIndex,
- final boolean isAligned,
- final boolean isGeneratedByPipe,
+ final InsertNode insertNode,
final String pipeName,
final long creationTime,
final PipeTaskMeta pipeTaskMeta,
@@ -122,34 +95,24 @@
final long startTime,
final long endTime) {
super(pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime);
- this.walEntryHandler = walEntryHandler;
// Record device path here so there's no need to get it from InsertNode cache later.
- this.devicePath = devicePath;
- this.progressIndex = progressIndex;
- this.isAligned = isAligned;
- this.isGeneratedByPipe = isGeneratedByPipe;
-
+ this.progressIndex = insertNode.getProgressIndex();
+ this.insertNode = insertNode;
this.allocatedMemoryBlock = new AtomicReference<>();
}
- public InsertNode getInsertNode() throws WALPipeException {
- return walEntryHandler.getInsertNode();
+ public InsertNode getInsertNode() {
+ return insertNode;
}
public ByteBuffer getByteBuffer() throws WALPipeException {
- return walEntryHandler.getByteBuffer();
- }
-
- // This method is a pre-determination of whether to use binary transfers.
- // If the insert node is null in cache, it means that we need to read the bytebuffer from the wal,
- // and when the pattern is default, we can transfer the bytebuffer directly without serializing or
- // deserializing
- public InsertNode getInsertNodeViaCacheIfPossible() {
- return walEntryHandler.getInsertNodeViaCacheIfPossible();
+ return insertNode.serializeToByteBuffer();
}
public String getDeviceId() {
- return Objects.nonNull(devicePath) ? devicePath.getFullPath() : null;
+ return Objects.nonNull(insertNode.getDevicePath())
+ ? insertNode.getDevicePath().getFullPath()
+ : null;
}
/////////////////////////// EnrichedEvent ///////////////////////////
@@ -158,19 +121,16 @@
public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) {
extractTime = System.nanoTime();
try {
- PipeDataNodeResourceManager.wal().pin(walEntryHandler);
if (Objects.nonNull(pipeName)) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.increaseInsertNodeEventCount(pipeName, creationTime);
- PipeDataNodeAgent.task().addFloatingMemoryUsageInByte(pipeName, ramBytesUsed());
+ PipeDataNodeAgent.task()
+ .addFloatingMemoryUsageInByte(pipeName, creationTime, ramBytesUsed());
}
return true;
} catch (final Exception e) {
LOGGER.warn(
- String.format(
- "Increase reference count for memTable %d error. Holder Message: %s",
- walEntryHandler.getMemTableId(), holderMessage),
- e);
+ String.format("Increase reference count error. Holder Message: %s", holderMessage), e);
return false;
}
}
@@ -178,7 +138,6 @@
@Override
public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) {
try {
- PipeDataNodeResourceManager.wal().unpin(walEntryHandler);
// release the containers' memory and close memory block
if (dataContainers != null) {
dataContainers.clear();
@@ -188,17 +147,16 @@
return true;
} catch (final Exception e) {
LOGGER.warn(
- String.format(
- "Decrease reference count for memtable %d error. Holder Message: %s",
- walEntryHandler.getMemTableId(), holderMessage),
- e);
+ String.format("Decrease reference count error. Holder Message: %s", holderMessage), e);
return false;
} finally {
if (Objects.nonNull(pipeName)) {
- PipeDataNodeAgent.task().decreaseFloatingMemoryUsageInByte(pipeName, ramBytesUsed());
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeAgent.task()
+ .decreaseFloatingMemoryUsageInByte(pipeName, creationTime, ramBytesUsed());
+ PipeDataNodeSinglePipeMetrics.getInstance()
.decreaseInsertNodeEventCount(pipeName, creationTime, System.nanoTime() - extractTime);
}
+ insertNode = null;
}
}
@@ -221,28 +179,18 @@
final long startTime,
final long endTime) {
return new PipeInsertNodeTabletInsertionEvent(
- walEntryHandler,
- devicePath,
- progressIndex,
- isAligned,
- isGeneratedByPipe,
- pipeName,
- creationTime,
- pipeTaskMeta,
- pattern,
- startTime,
- endTime);
+ insertNode, pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime);
}
@Override
public boolean isGeneratedByPipe() {
- return isGeneratedByPipe;
+ return insertNode.isGeneratedByPipe();
}
@Override
public boolean mayEventTimeOverlappedWithTimeRange() {
try {
- final InsertNode insertNode = getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = getInsertNode();
if (Objects.isNull(insertNode)) {
return true;
}
@@ -286,7 +234,7 @@
@Override
public boolean mayEventPathsOverlappedWithPattern() {
try {
- final InsertNode insertNode = getInsertNodeViaCacheIfPossible();
+ final InsertNode insertNode = getInsertNode();
if (Objects.isNull(insertNode)) {
return true;
}
@@ -439,8 +387,8 @@
@Override
public String toString() {
return String.format(
- "PipeInsertNodeTabletInsertionEvent{walEntryHandler=%s, progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s, dataContainers=%s}",
- walEntryHandler, progressIndex, isAligned, isGeneratedByPipe, dataContainers)
+ "PipeInsertNodeTabletInsertionEvent{progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s, dataContainers=%s}",
+ progressIndex, insertNode.isAligned(), insertNode.isGeneratedByPipe(), dataContainers)
+ " - "
+ super.toString();
}
@@ -448,8 +396,8 @@
@Override
public String coreReportMessage() {
return String.format(
- "PipeInsertNodeTabletInsertionEvent{walEntryHandler=%s, progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s}",
- walEntryHandler, progressIndex, isAligned, isGeneratedByPipe)
+ "PipeInsertNodeTabletInsertionEvent{progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s}",
+ progressIndex, insertNode.isAligned(), insertNode.isGeneratedByPipe())
+ " - "
+ super.coreReportMessage();
}
@@ -462,7 +410,7 @@
@Override
public long ramBytesUsed() {
return INSTANCE_SIZE
- + (Objects.nonNull(devicePath) ? PartialPath.estimateSize(devicePath) : 0)
+ + (Objects.nonNull(insertNode) ? InsertNodeMemoryEstimator.sizeOf(insertNode) : 0)
+ (Objects.nonNull(progressIndex) ? progressIndex.ramBytesUsed() : 0);
}
@@ -476,28 +424,24 @@
@Override
public PipeEventResource eventResourceBuilder() {
return new PipeInsertNodeTabletInsertionEventResource(
- this.isReleased, this.referenceCount, this.walEntryHandler, this.allocatedMemoryBlock);
+ this.isReleased, this.referenceCount, this.allocatedMemoryBlock);
}
private static class PipeInsertNodeTabletInsertionEventResource extends PipeEventResource {
- private final WALEntryHandler walEntryHandler;
private final AtomicReference<PipeTabletMemoryBlock> allocatedMemoryBlock;
private PipeInsertNodeTabletInsertionEventResource(
final AtomicBoolean isReleased,
final AtomicInteger referenceCount,
- final WALEntryHandler walEntryHandler,
final AtomicReference<PipeTabletMemoryBlock> allocatedMemoryBlock) {
super(isReleased, referenceCount);
- this.walEntryHandler = walEntryHandler;
this.allocatedMemoryBlock = allocatedMemoryBlock;
}
@Override
protected void finalizeResource() {
try {
- PipeDataNodeResourceManager.wal().unpin(walEntryHandler);
allocatedMemoryBlock.getAndUpdate(
memoryBlock -> {
if (Objects.nonNull(memoryBlock)) {
@@ -506,8 +450,7 @@
return null;
});
} catch (final Exception e) {
- LOGGER.warn(
- "Decrease reference count for memTable {} error.", walEntryHandler.getMemTableId(), e);
+ LOGGER.warn("Decrease reference count error.", e);
}
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java
index 95a5327..9f3ad7a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeRawTabletInsertionEvent.java
@@ -28,7 +28,7 @@
import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil;
import org.apache.iotdb.db.pipe.resource.memory.PipeTabletMemoryBlock;
@@ -130,7 +130,7 @@
allocatedMemoryBlock,
PipeMemoryWeightUtil.calculateTabletSizeInBytes(tablet) + INSTANCE_SIZE);
if (Objects.nonNull(pipeName)) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.increaseRawTabletEventCount(pipeName, creationTime);
}
return true;
@@ -139,7 +139,7 @@
@Override
public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) {
if (Objects.nonNull(pipeName)) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.decreaseRawTabletEventCount(pipeName, creationTime);
}
allocatedMemoryBlock.close();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java
index ca78dd7..91d38cf 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java
@@ -19,23 +19,16 @@
package org.apache.iotdb.db.pipe.event.common.terminate;
-import org.apache.iotdb.common.rpc.thrift.TFlushReq;
import org.apache.iotdb.commons.consensus.index.ProgressIndex;
import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.agent.task.PipeDataNodeTask;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
-import org.apache.iotdb.db.storageengine.StorageEngine;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.atomic.AtomicLong;
/**
* The {@link PipeTerminateEvent} is an {@link EnrichedEvent} that controls the termination of pipe,
@@ -45,40 +38,6 @@
*/
public class PipeTerminateEvent extends EnrichedEvent {
- private static final Logger LOGGER = LoggerFactory.getLogger(PipeTerminateEvent.class);
-
- private static final AtomicLong PROGRESS_REPORT_COUNT = new AtomicLong(0);
- private static final AtomicLong LAST_PROGRESS_REPORT_TIME = new AtomicLong(0);
-
- public static void flushDataRegionIfNeeded() {
- if (PROGRESS_REPORT_COUNT.get() > 0
- && PROGRESS_REPORT_COUNT.get()
- > PipeConfig.getInstance().getPipeFlushAfterTerminateCount()) {
- flushDataRegion();
- return;
- }
-
- if (LAST_PROGRESS_REPORT_TIME.get() > 0
- && System.currentTimeMillis() - LAST_PROGRESS_REPORT_TIME.get()
- > PipeConfig.getInstance().getPipeFlushAfterLastTerminateSeconds() * 1000L) {
- flushDataRegion();
- }
- }
-
- private static void flushDataRegion() {
- try {
- StorageEngine.getInstance().operateFlush(new TFlushReq());
- PROGRESS_REPORT_COUNT.set(0);
- LAST_PROGRESS_REPORT_TIME.set(0);
- LOGGER.info("Force flush all data regions because of last progress report time.");
- } catch (final Exception e) {
- LOGGER.warn(
- "Failed to flush all data regions, please check the error message: {}",
- e.getMessage(),
- e);
- }
- }
-
private final int dataRegionId;
public PipeTerminateEvent(
@@ -135,9 +94,6 @@
@Override
public void reportProgress() {
- PROGRESS_REPORT_COUNT.incrementAndGet();
- LAST_PROGRESS_REPORT_TIME.set(System.currentTimeMillis());
-
// To avoid deadlock
CompletableFuture.runAsync(
() -> PipeDataNodeAgent.task().markCompleted(pipeName, dataRegionId));
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java
new file mode 100644
index 0000000..0f034db
--- /dev/null
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.pipe.event.common.tsfile;
+
+import org.apache.iotdb.commons.consensus.index.ProgressIndex;
+import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex;
+import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
+import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey;
+import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern;
+import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
+import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper;
+import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor;
+import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
+
+import java.io.File;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+public class PipeCompactedTsFileInsertionEvent extends PipeTsFileInsertionEvent {
+
+ private final String dataRegionId;
+ private final Set<String> originFilePaths;
+ private final List<Long> commitIds;
+
+ public PipeCompactedTsFileInsertionEvent(
+ final CommitterKey committerKey,
+ final Set<PipeTsFileInsertionEvent> originalEvents,
+ final PipeTsFileInsertionEvent anyOfOriginalEvents,
+ final TsFileResource tsFileResource,
+ final boolean shouldReportProgress) {
+ super(
+ tsFileResource,
+ bindIsWithMod(originalEvents),
+ bindIsLoaded(originalEvents),
+ bindIsGeneratedByHistoricalExtractor(originalEvents),
+ committerKey.getPipeName(),
+ committerKey.getCreationTime(),
+ anyOfOriginalEvents.getPipeTaskMeta(),
+ anyOfOriginalEvents.getPipePattern(),
+ anyOfOriginalEvents.getStartTime(),
+ anyOfOriginalEvents.getEndTime());
+
+ this.dataRegionId = String.valueOf(committerKey.getRegionId());
+ this.originFilePaths =
+ originalEvents.stream()
+ .map(PipeTsFileInsertionEvent::getTsFile)
+ .map(File::getPath)
+ .collect(Collectors.toSet());
+ this.commitIds =
+ originalEvents.stream()
+ .map(PipeTsFileInsertionEvent::getCommitId)
+ .distinct()
+ .collect(Collectors.toList());
+
+ // init fields of EnrichedEvent
+ this.committerKey = committerKey;
+ // TODO pipe consensus: handle rebootTimes
+ isPatternParsed = bindIsPatternParsed(originalEvents);
+ isTimeParsed = bindIsTimeParsed(originalEvents);
+ this.shouldReportOnCommit = shouldReportProgress;
+
+ // init fields of PipeTsFileInsertionEvent
+ flushPointCount = bindFlushPointCount(originalEvents);
+ overridingProgressIndex = bindOverridingProgressIndex(originalEvents);
+ }
+
+ private static boolean bindIsWithMod(Set<PipeTsFileInsertionEvent> originalEvents) {
+ return originalEvents.stream().anyMatch(PipeTsFileInsertionEvent::isWithMod);
+ }
+
+ private static boolean bindIsLoaded(Set<PipeTsFileInsertionEvent> originalEvents) {
+ return originalEvents.stream().anyMatch(PipeTsFileInsertionEvent::isLoaded);
+ }
+
+ private static boolean bindIsGeneratedByHistoricalExtractor(
+ Set<PipeTsFileInsertionEvent> originalEvents) {
+ return originalEvents.stream()
+ .anyMatch(PipeTsFileInsertionEvent::isGeneratedByHistoricalExtractor);
+ }
+
+ private static boolean bindIsTimeParsed(Set<PipeTsFileInsertionEvent> originalEvents) {
+ return originalEvents.stream().noneMatch(EnrichedEvent::shouldParseTime);
+ }
+
+ private static boolean bindIsPatternParsed(Set<PipeTsFileInsertionEvent> originalEvents) {
+ return originalEvents.stream().noneMatch(EnrichedEvent::shouldParsePattern);
+ }
+
+ private static long bindFlushPointCount(Set<PipeTsFileInsertionEvent> originalEvents) {
+ return originalEvents.stream()
+ .mapToLong(
+ e ->
+ e.getFlushPointCount() == TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET
+ ? 0
+ : e.getFlushPointCount())
+ .sum();
+ }
+
+ private ProgressIndex bindOverridingProgressIndex(Set<PipeTsFileInsertionEvent> originalEvents) {
+ ProgressIndex overridingProgressIndex = MinimumProgressIndex.INSTANCE;
+ for (PipeTsFileInsertionEvent originalEvent : originalEvents) {
+ if (originalEvent.overridingProgressIndex != null) {
+ overridingProgressIndex =
+ overridingProgressIndex.updateToMinimumEqualOrIsAfterProgressIndex(
+ originalEvent.overridingProgressIndex);
+ }
+ }
+ return overridingProgressIndex != null
+ && !overridingProgressIndex.equals(MinimumProgressIndex.INSTANCE)
+ ? overridingProgressIndex
+ : null;
+ }
+
+ @Override
+ public int getRebootTimes() {
+ throw new UnsupportedOperationException(
+ "PipeCompactedTsFileInsertionEvent does not support getRebootTimes.");
+ }
+
+ @Override
+ public boolean hasMultipleCommitIds() {
+ return true;
+ }
+
+ @Override
+ public long getCommitId() {
+ // max of commitIds is used as the commit id for this event
+ return commitIds.stream()
+ .max(Long::compareTo)
+ .orElseThrow(
+ () ->
+ new IllegalStateException(
+ "No commit IDs found in PipeCompactedTsFileInsertionEvent."));
+ }
+
+ // return dummy events for each commit ID (except the max one)
+ @Override
+ public List<EnrichedEvent> getDummyEventsForCommitIds() {
+ return commitIds.stream()
+ .filter(commitId -> commitId != getCommitId())
+ .map(PipeCompactedTsFileInsertionDummyEvent::new)
+ .collect(Collectors.toList());
+ }
+
+ @Override
+ public List<Long> getCommitIds() {
+ return commitIds;
+ }
+
+ @Override
+ public boolean equalsInPipeConsensus(final Object o) {
+ throw new UnsupportedOperationException(
+ "PipeCompactedTsFileInsertionEvent does not support equalsInPipeConsensus.");
+ }
+
+ @Override
+ public void eliminateProgressIndex() {
+ if (Objects.isNull(overridingProgressIndex)) {
+ for (final String originFilePath : originFilePaths) {
+ PipeTsFileEpochProgressIndexKeeper.getInstance()
+ .eliminateProgressIndex(dataRegionId, originFilePath);
+ }
+ }
+ }
+
+ public class PipeCompactedTsFileInsertionDummyEvent extends EnrichedEvent {
+
+ private final long commitId;
+
+ public PipeCompactedTsFileInsertionDummyEvent(final long commitId) {
+ super(
+ PipeCompactedTsFileInsertionEvent.this.pipeName,
+ PipeCompactedTsFileInsertionEvent.this.creationTime,
+ PipeCompactedTsFileInsertionEvent.this.pipeTaskMeta,
+ null, // PipePattern is not needed for dummy event
+ Long.MIN_VALUE,
+ Long.MAX_VALUE);
+ this.commitId = commitId; // Use the commitId passed in
+ this.shouldReportOnCommit = false; // Dummy events do not report progress
+ }
+
+ @Override
+ public long getCommitId() {
+ return commitId;
+ }
+
+ @Override
+ public boolean internallyIncreaseResourceReferenceCount(String holderMessage) {
+ return true;
+ }
+
+ @Override
+ public boolean internallyDecreaseResourceReferenceCount(String holderMessage) {
+ return true;
+ }
+
+ @Override
+ public ProgressIndex getProgressIndex() {
+ return MinimumProgressIndex.INSTANCE;
+ }
+
+ @Override
+ public EnrichedEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport(
+ String pipeName,
+ long creationTime,
+ PipeTaskMeta pipeTaskMeta,
+ PipePattern pattern,
+ long startTime,
+ long endTime) {
+ return null;
+ }
+
+ @Override
+ public boolean isGeneratedByPipe() {
+ return false;
+ }
+
+ @Override
+ public boolean mayEventTimeOverlappedWithTimeRange() {
+ return false;
+ }
+
+ @Override
+ public boolean mayEventPathsOverlappedWithPattern() {
+ return false;
+ }
+
+ @Override
+ public String coreReportMessage() {
+ return "PipeCompactedTsFileInsertionDummyEvent";
+ }
+ }
+}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java
index 3aa9b4a..d2c443e 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java
@@ -32,7 +32,7 @@
import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainer;
import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainerProvider;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager;
import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager;
@@ -65,44 +65,31 @@
private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileInsertionEvent.class);
- private final TsFileResource resource;
- private File tsFile;
- private long extractTime = 0;
+ protected final TsFileResource resource;
+ protected File tsFile;
+ protected long extractTime = 0;
// This is true iff the modFile exists and should be transferred
- private boolean isWithMod;
- private File modFile;
+ protected boolean isWithMod;
+ protected File modFile;
- private final boolean isLoaded;
- private final boolean isGeneratedByPipe;
- private final boolean isGeneratedByPipeConsensus;
- private final boolean isGeneratedByHistoricalExtractor;
+ protected final boolean isLoaded;
+ protected final boolean isGeneratedByPipe;
+ protected final boolean isGeneratedByPipeConsensus;
+ protected final boolean isGeneratedByHistoricalExtractor;
- private final AtomicBoolean isClosed;
- private final AtomicReference<TsFileInsertionDataContainer> dataContainer;
+ protected final AtomicBoolean isClosed;
+ protected final AtomicReference<TsFileInsertionDataContainer> dataContainer;
// The point count of the TsFile. Used for metrics on PipeConsensus' receiver side.
// May be updated after it is flushed. Should be negative if not set.
- private long flushPointCount = TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET;
+ protected long flushPointCount = TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET;
- private volatile ProgressIndex overridingProgressIndex;
+ protected volatile ProgressIndex overridingProgressIndex;
- public PipeTsFileInsertionEvent(
- final TsFileResource resource,
- final boolean isLoaded,
- final boolean isGeneratedByHistoricalExtractor) {
+ public PipeTsFileInsertionEvent(final TsFileResource resource, final boolean isLoaded) {
// The modFile must be copied before the event is assigned to the listening pipes
- this(
- resource,
- true,
- isLoaded,
- isGeneratedByHistoricalExtractor,
- null,
- 0,
- null,
- null,
- Long.MIN_VALUE,
- Long.MAX_VALUE);
+ this(resource, true, isLoaded, false, null, 0, null, null, Long.MIN_VALUE, Long.MAX_VALUE);
}
public PipeTsFileInsertionEvent(
@@ -117,9 +104,13 @@
final long startTime,
final long endTime) {
super(pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime);
-
this.resource = resource;
- tsFile = resource.getTsFile();
+
+ // For events created at assigner or historical extractor, the tsFile is get from the resource
+ // For events created for source, the tsFile is inherited from the assigner, because the
+ // original tsFile may be gone, and we need to get the assigner's hard-linked tsFile to
+ // hard-link it to each pipe dir
+ this.tsFile = resource.getTsFile();
final ModificationFile modFile = resource.getModFile();
this.isWithMod = isWithMod && modFile.exists();
@@ -130,6 +121,8 @@
this.isGeneratedByPipeConsensus = resource.isGeneratedByPipeConsensus();
this.isGeneratedByHistoricalExtractor = isGeneratedByHistoricalExtractor;
+ this.dataContainer = new AtomicReference<>(null);
+
isClosed = new AtomicBoolean(resource.isClosed());
// Register close listener if TsFile is not closed
if (!isClosed.get()) {
@@ -165,8 +158,6 @@
// If the status is "closed", then the resource status is "closed", the tsFile won't be altered
// and can be sent.
isClosed.set(resource.isClosed());
-
- this.dataContainer = new AtomicReference<>(null);
}
/**
@@ -174,6 +165,10 @@
* otherwise.
*/
public boolean waitForTsFileClose() throws InterruptedException {
+ if (Objects.isNull(resource)) {
+ return true;
+ }
+
if (!isClosed.get()) {
isClosed.set(resource.isClosed());
@@ -204,6 +199,7 @@
return !resource.isEmpty();
}
+ @Override
public File getTsFile() {
return tsFile;
}
@@ -226,10 +222,6 @@
return isLoaded;
}
- public long getFileStartTime() {
- return resource.getFileStartTime();
- }
-
/**
* Only used for metrics on PipeConsensus' receiver side. If the event is recovered after data
* node's restart, the flushPointCount can be not set. It's totally fine for the PipeConsensus'
@@ -252,9 +244,10 @@
public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) {
extractTime = System.nanoTime();
try {
- tsFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(tsFile, true, resource);
+ tsFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(tsFile, true, pipeName);
if (isWithMod) {
- modFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(modFile, false, null);
+ modFile =
+ PipeDataNodeResourceManager.tsfile().increaseFileReference(modFile, false, pipeName);
}
return true;
} catch (final Exception e) {
@@ -266,7 +259,7 @@
return false;
} finally {
if (Objects.nonNull(pipeName)) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.increaseTsFileEventCount(pipeName, creationTime);
}
}
@@ -275,9 +268,9 @@
@Override
public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) {
try {
- PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile);
+ PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile, pipeName);
if (isWithMod) {
- PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile);
+ PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile, pipeName);
}
close();
return true;
@@ -290,7 +283,7 @@
return false;
} finally {
if (Objects.nonNull(pipeName)) {
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.decreaseTsFileEventCount(pipeName, creationTime, System.nanoTime() - extractTime);
}
}
@@ -303,24 +296,7 @@
@Override
public ProgressIndex getProgressIndex() {
- try {
- if (!waitForTsFileClose()) {
- LOGGER.warn(
- "Skipping temporary TsFile {}'s progressIndex, will report MinimumProgressIndex",
- tsFile);
- return MinimumProgressIndex.INSTANCE;
- }
- if (Objects.nonNull(overridingProgressIndex)) {
- return overridingProgressIndex;
- }
- return resource.getMaxProgressIndexAfterClose();
- } catch (final InterruptedException e) {
- LOGGER.warn(
- String.format(
- "Interrupted when waiting for closing TsFile %s.", resource.getTsFilePath()));
- Thread.currentThread().interrupt();
- return MinimumProgressIndex.INSTANCE;
- }
+ return resource.getMaxProgressIndex();
}
/**
@@ -346,7 +322,7 @@
}
public void eliminateProgressIndex() {
- if (Objects.isNull(overridingProgressIndex)) {
+ if (Objects.isNull(overridingProgressIndex) && Objects.nonNull(resource)) {
PipeTsFileEpochProgressIndexKeeper.getInstance()
.eliminateProgressIndex(resource.getDataRegionId(), resource.getTsFilePath());
}
@@ -380,16 +356,15 @@
@Override
public boolean mayEventTimeOverlappedWithTimeRange() {
- // If the tsFile is not closed the resource.getFileEndTime() will be Long.MIN_VALUE
- // In that case we only judge the resource.getFileStartTime() to avoid losing data
- return isClosed.get()
- ? startTime <= resource.getFileEndTime() && resource.getFileStartTime() <= endTime
- : resource.getFileStartTime() <= endTime;
+ // Notice that this is only called at realtime extraction, and the tsFile is always closed
+ // Thus we can use the end time to judge the overlap
+ return Objects.isNull(resource)
+ || startTime <= resource.getFileEndTime() && resource.getFileStartTime() <= endTime;
}
@Override
public boolean mayEventPathsOverlappedWithPattern() {
- if (!resource.isClosed()) {
+ if (Objects.isNull(resource) || !resource.isClosed()) {
return true;
}
@@ -397,7 +372,8 @@
final Map<IDeviceID, Boolean> deviceIsAlignedMap =
PipeDataNodeResourceManager.tsfile()
.getDeviceIsAlignedMapFromCache(
- PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()),
+ PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(
+ resource.getTsFile(), pipeName),
false);
final Set<IDeviceID> deviceSet =
Objects.nonNull(deviceIsAlignedMap) ? deviceIsAlignedMap.keySet() : resource.getDevices();
@@ -569,7 +545,7 @@
} catch (final IOException e) {
close();
- final String errorMsg = String.format("Read TsFile %s error.", resource.getTsFilePath());
+ final String errorMsg = String.format("Read TsFile %s error.", tsFile.getPath());
LOGGER.warn(errorMsg, e);
throw new PipeException(errorMsg);
}
@@ -617,8 +593,8 @@
@Override
public String toString() {
return String.format(
- "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, isClosed=%s, dataContainer=%s}",
- resource, tsFile, isLoaded, isGeneratedByPipe, isClosed.get(), dataContainer)
+ "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, dataContainer=%s}",
+ resource, tsFile, isLoaded, isGeneratedByPipe, dataContainer)
+ " - "
+ super.toString();
}
@@ -626,8 +602,8 @@
@Override
public String coreReportMessage() {
return String.format(
- "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, isClosed=%s}",
- resource, tsFile, isLoaded, isGeneratedByPipe, isClosed.get())
+ "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s}",
+ resource, tsFile, isLoaded, isGeneratedByPipe)
+ " - "
+ super.coreReportMessage();
}
@@ -644,6 +620,7 @@
return new PipeTsFileInsertionEventResource(
this.isReleased,
this.referenceCount,
+ this.pipeName,
this.tsFile,
this.isWithMod,
this.modFile,
@@ -656,15 +633,18 @@
private final boolean isWithMod;
private final File modFile;
private final AtomicReference<TsFileInsertionDataContainer> dataContainer;
+ private final String pipeName;
private PipeTsFileInsertionEventResource(
final AtomicBoolean isReleased,
final AtomicInteger referenceCount,
+ final String pipeName,
final File tsFile,
final boolean isWithMod,
final File modFile,
final AtomicReference<TsFileInsertionDataContainer> dataContainer) {
super(isReleased, referenceCount);
+ this.pipeName = pipeName;
this.tsFile = tsFile;
this.isWithMod = isWithMod;
this.modFile = modFile;
@@ -675,9 +655,9 @@
protected void finalizeResource() {
try {
// decrease reference count
- PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile);
+ PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile, pipeName);
if (isWithMod) {
- PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile);
+ PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile, pipeName);
}
// close data container
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java
index 21ce698..18ef721 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java
@@ -28,7 +28,8 @@
import org.apache.iotdb.db.pipe.event.common.tsfile.container.scan.TsFileInsertionScanDataContainer;
import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
-import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResource;
+import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager;
+import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileMemResource;
import org.apache.tsfile.file.metadata.IDeviceID;
import org.apache.tsfile.file.metadata.PlainDeviceID;
@@ -79,8 +80,8 @@
// Use scan container to save memory
if ((double) PipeDataNodeResourceManager.memory().getUsedMemorySizeInBytes()
- / PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes()
- > PipeTsFileResource.MEMORY_SUFFICIENT_THRESHOLD) {
+ / PipeMemoryManager.getTotalNonFloatingMemorySizeInBytes()
+ > PipeTsFileMemResource.MEMORY_SUFFICIENT_THRESHOLD) {
return new TsFileInsertionScanDataContainer(
pipeName, creationTime, tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent);
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java
index d4a3919..65525ff 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEvent.java
@@ -84,10 +84,6 @@
return device2Measurements;
}
- public void gcSchemaInfo() {
- device2Measurements = null;
- }
-
public boolean mayExtractorUseTablets(final PipeRealtimeDataRegionExtractor extractor) {
final TsFileEpoch.State state = tsFileEpoch.getState(extractor);
return state.equals(TsFileEpoch.State.EMPTY) || state.equals(TsFileEpoch.State.USING_TABLET);
@@ -182,9 +178,7 @@
event.shallowCopySelfAndBindPipeTaskMetaForProgressReport(
pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime),
this.tsFileEpoch,
- // device2Measurements is not used anymore, so it is not copied.
- // If null is not passed, the field will not be GCed and may cause OOM.
- null,
+ this.device2Measurements,
pipeTaskMeta,
pattern,
startTime,
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java
index ef9db9a..0c3bce5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java
@@ -28,7 +28,6 @@
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
public class PipeRealtimeEventFactory {
@@ -37,22 +36,13 @@
public static PipeRealtimeEvent createRealtimeEvent(
final TsFileResource resource, final boolean isLoaded) {
return TS_FILE_EPOCH_MANAGER.bindPipeTsFileInsertionEvent(
- new PipeTsFileInsertionEvent(resource, isLoaded, false), resource);
+ new PipeTsFileInsertionEvent(resource, isLoaded), resource);
}
public static PipeRealtimeEvent createRealtimeEvent(
- final WALEntryHandler walEntryHandler,
- final InsertNode insertNode,
- final TsFileResource resource) {
+ final InsertNode insertNode, final TsFileResource resource) {
return TS_FILE_EPOCH_MANAGER.bindPipeInsertNodeTabletInsertionEvent(
- new PipeInsertNodeTabletInsertionEvent(
- walEntryHandler,
- insertNode.getDevicePath(),
- insertNode.getProgressIndex(),
- insertNode.isAligned(),
- insertNode.isGeneratedByPipe()),
- insertNode,
- resource);
+ new PipeInsertNodeTabletInsertionEvent(insertNode), insertNode, resource);
}
public static PipeRealtimeEvent createRealtimeEvent(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java
index 808df12..3d5b616 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java
@@ -20,7 +20,7 @@
package org.apache.iotdb.db.pipe.extractor.dataregion;
import org.apache.iotdb.commons.consensus.DataRegionId;
-import org.apache.iotdb.commons.exception.IllegalPathException;
+import org.apache.iotdb.commons.pipe.config.constant.SystemConstant;
import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern;
import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern;
import org.apache.iotdb.commons.pipe.extractor.IoTDBExtractor;
@@ -34,11 +34,10 @@
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionHybridExtractor;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionLogExtractor;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionTsFileExtractor;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics;
import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics;
import org.apache.iotdb.db.storageengine.StorageEngine;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode;
import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
@@ -47,11 +46,12 @@
import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent;
import org.apache.iotdb.pipe.api.exception.PipeException;
-import org.apache.tsfile.file.metadata.enums.CompressionType;
import org.apache.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.Nullable;
+
import java.util.Arrays;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicReference;
@@ -95,13 +95,12 @@
private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionExtractor.class);
- private PipeHistoricalDataRegionExtractor historicalExtractor;
+ private @Nullable PipeHistoricalDataRegionExtractor historicalExtractor;
private PipeRealtimeDataRegionExtractor realtimeExtractor;
private DataRegionWatermarkInjector watermarkInjector;
private boolean hasNoExtractionNeed = true;
- private boolean shouldExtractInsertion = false;
private boolean shouldExtractDeletion = false;
@Override
@@ -116,7 +115,6 @@
return;
}
hasNoExtractionNeed = false;
- shouldExtractInsertion = insertionDeletionListeningOptionPair.getLeft();
shouldExtractDeletion = insertionDeletionListeningOptionPair.getRight();
if (insertionDeletionListeningOptionPair.getLeft().equals(true)
@@ -213,10 +211,22 @@
EXTRACTOR_HISTORY_END_TIME_KEY);
}
- constructHistoricalExtractor();
+ if (validator
+ .getParameters()
+ .getBooleanOrDefault(SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE)
+ || validator
+ .getParameters()
+ .getBooleanOrDefault(
+ Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY),
+ EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE)) {
+ // Do not flush or open historical extractor when historical tsFile is disabled
+ constructHistoricalExtractor();
+ }
constructRealtimeExtractor(validator.getParameters());
- historicalExtractor.validate(validator);
+ if (Objects.nonNull(historicalExtractor)) {
+ historicalExtractor.validate(validator);
+ }
realtimeExtractor.validate(validator);
}
@@ -237,12 +247,10 @@
}
private void constructHistoricalExtractor() {
- // Enable historical extractor by default
historicalExtractor = new PipeHistoricalDataRegionTsFileExtractor();
}
- private void constructRealtimeExtractor(final PipeParameters parameters)
- throws IllegalPathException {
+ private void constructRealtimeExtractor(final PipeParameters parameters) {
// Use heartbeat only extractor if disable realtime extractor
if (!parameters.getBooleanOrDefault(
Arrays.asList(EXTRACTOR_REALTIME_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY),
@@ -270,7 +278,6 @@
// Use hybrid mode by default
if (!parameters.hasAnyAttributes(EXTRACTOR_REALTIME_MODE_KEY, SOURCE_REALTIME_MODE_KEY)) {
- checkWalEnableAndSetUncompressed(parameters);
realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor();
LOGGER.info(
"Pipe: '{}' is not set, use hybrid mode by default.", EXTRACTOR_REALTIME_MODE_KEY);
@@ -285,15 +292,12 @@
case EXTRACTOR_REALTIME_MODE_HYBRID_VALUE:
case EXTRACTOR_REALTIME_MODE_LOG_VALUE:
case EXTRACTOR_REALTIME_MODE_STREAM_MODE_VALUE:
- checkWalEnableAndSetUncompressed(parameters);
realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor();
break;
case EXTRACTOR_REALTIME_MODE_FORCED_LOG_VALUE:
- checkWalEnableAndSetUncompressed(parameters);
realtimeExtractor = new PipeRealtimeDataRegionLogExtractor();
break;
default:
- checkWalEnableAndSetUncompressed(parameters);
realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor();
if (LOGGER.isWarnEnabled()) {
LOGGER.warn(
@@ -303,28 +307,6 @@
}
}
- private void checkWalEnableAndSetUncompressed(final PipeParameters parameters)
- throws IllegalPathException {
- if (Boolean.TRUE.equals(
- DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(parameters)
- .getLeft())
- && IoTDBDescriptor.getInstance().getConfig().getWalMode().equals(WALMode.DISABLE)) {
- throw new PipeException(
- "The pipe cannot transfer realtime insertion if data region disables wal. Please set 'realtime.mode'='batch' in source parameters when enabling realtime transmission.");
- }
-
- if (!IoTDBDescriptor.getInstance()
- .getConfig()
- .getWALCompressionAlgorithm()
- .equals(CompressionType.UNCOMPRESSED)) {
- LOGGER.info(
- "The pipe prefers uncompressed wal, and may introduce certain delay in realtime insert syncing without it. Hence, we change it to uncompressed.");
- IoTDBDescriptor.getInstance()
- .getConfig()
- .setWALCompressionAlgorithm(CompressionType.UNCOMPRESSED);
- }
- }
-
@Override
public void customize(
final PipeParameters parameters, final PipeExtractorRuntimeConfiguration configuration)
@@ -335,7 +317,9 @@
super.customize(parameters, configuration);
- historicalExtractor.customize(parameters, configuration);
+ if (Objects.nonNull(historicalExtractor)) {
+ historicalExtractor.customize(parameters, configuration);
+ }
realtimeExtractor.customize(parameters, configuration);
// Set watermark injector
@@ -358,7 +342,7 @@
// register metric after generating taskID
PipeDataRegionExtractorMetrics.getInstance().register(this);
PipeTsFileToTabletsMetrics.getInstance().register(this);
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance().register(this);
+ PipeDataNodeSinglePipeMetrics.getInstance().register(this);
}
@Override
@@ -372,7 +356,9 @@
"Pipe {}@{}: Starting historical extractor {} and realtime extractor {}.",
pipeName,
regionId,
- historicalExtractor.getClass().getSimpleName(),
+ Objects.nonNull(historicalExtractor)
+ ? historicalExtractor.getClass().getSimpleName()
+ : null,
realtimeExtractor.getClass().getSimpleName());
super.start();
@@ -425,14 +411,18 @@
// There can still be writing when tsFile events are added. If we start
// realtimeExtractor after the process, then this part of data will be lost.
realtimeExtractor.start();
- historicalExtractor.start();
+ if (Objects.nonNull(historicalExtractor)) {
+ historicalExtractor.start();
+ }
} catch (final Exception e) {
exceptionHolder.set(e);
LOGGER.warn(
"Pipe {}@{}: Start historical extractor {} and realtime extractor {} error.",
pipeName,
regionId,
- historicalExtractor.getClass().getSimpleName(),
+ Objects.nonNull(historicalExtractor)
+ ? historicalExtractor.getClass().getSimpleName()
+ : null,
realtimeExtractor.getClass().getSimpleName(),
e);
}
@@ -451,7 +441,7 @@
}
Event event = null;
- if (!historicalExtractor.hasConsumedAll()) {
+ if (Objects.nonNull(historicalExtractor) && !historicalExtractor.hasConsumedAll()) {
event = historicalExtractor.supply();
} else {
if (Objects.nonNull(watermarkInjector)) {
@@ -481,32 +471,21 @@
return;
}
- historicalExtractor.close();
+ if (Objects.nonNull(historicalExtractor)) {
+ historicalExtractor.close();
+ }
realtimeExtractor.close();
if (Objects.nonNull(taskID)) {
PipeDataRegionExtractorMetrics.getInstance().deregister(taskID);
}
}
- //////////////////////////// APIs provided for detecting stuck ////////////////////////////
-
- public boolean shouldExtractInsertion() {
- return shouldExtractInsertion;
- }
-
- public boolean isStreamMode() {
- return realtimeExtractor instanceof PipeRealtimeDataRegionHybridExtractor
- || realtimeExtractor instanceof PipeRealtimeDataRegionLogExtractor;
- }
-
- public boolean hasConsumedAllHistoricalTsFiles() {
- return historicalExtractor.hasConsumedAll();
- }
-
//////////////////////////// APIs provided for metric framework ////////////////////////////
public int getHistoricalTsFileInsertionEventCount() {
- return hasBeenStarted.get() ? historicalExtractor.getPendingQueueSize() : 0;
+ return hasBeenStarted.get() && Objects.nonNull(historicalExtractor)
+ ? historicalExtractor.getPendingQueueSize()
+ : 0;
}
public int getTabletInsertionEventCount() {
@@ -520,10 +499,4 @@
public int getPipeHeartbeatEventCount() {
return hasBeenStarted.get() ? realtimeExtractor.getPipeHeartbeatEventCount() : 0;
}
-
- public int getEventCount() {
- return hasBeenStarted.get()
- ? (historicalExtractor.getPendingQueueSize() + realtimeExtractor.getEventCount())
- : 0;
- }
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java
index acec757..8c8a3a3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java
@@ -36,6 +36,7 @@
import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager;
import org.apache.iotdb.db.storageengine.StorageEngine;
import org.apache.iotdb.db.storageengine.dataregion.DataRegion;
+import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.generator.TsFileNameGenerator;
@@ -57,10 +58,10 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.Optional;
import java.util.Queue;
import java.util.Set;
import java.util.stream.Collectors;
@@ -91,9 +92,6 @@
private static final Logger LOGGER =
LoggerFactory.getLogger(PipeHistoricalDataRegionTsFileExtractor.class);
- private static final Map<Integer, Long> DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP = new HashMap<>();
- private static final long PIPE_MIN_FLUSH_INTERVAL_IN_MS = 2000;
-
private String pipeName;
private long creationTime;
@@ -214,17 +212,14 @@
try {
historicalDataExtractionStartTime =
- isHistoricalExtractorEnabled
- && parameters.hasAnyAttributes(
- EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY)
+ parameters.hasAnyAttributes(
+ EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY)
? DateTimeUtils.convertTimestampOrDatetimeStrToLongWithDefaultZone(
parameters.getStringByKeys(
EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY))
: Long.MIN_VALUE;
historicalDataExtractionEndTime =
- isHistoricalExtractorEnabled
- && parameters.hasAnyAttributes(
- EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY)
+ parameters.hasAnyAttributes(EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY)
? DateTimeUtils.convertTimestampOrDatetimeStrToLongWithDefaultZone(
parameters.getStringByKeys(
EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY))
@@ -268,10 +263,6 @@
startIndex = environment.getPipeTaskMeta().restoreProgressIndex();
dataRegionId = environment.getRegionId();
- synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) {
- DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.putIfAbsent(dataRegionId, 0L);
- }
-
pipePattern = PipePattern.parsePipePatternFromSourceParameters(parameters);
final DataRegion dataRegion =
@@ -295,34 +286,6 @@
// PipeHistoricalDataRegionExtractor from implementation perspective.
: environment.getCreationTime();
- // Only invoke flushDataRegionAllTsFiles() when the pipe runs in the realtime only mode.
- // realtime only mode -> (historicalDataExtractionTimeLowerBound != Long.MIN_VALUE)
- //
- // Ensure that all data in the data region is flushed to disk before extracting data.
- // This ensures the generation time of all newly generated TsFiles (realtime data) after the
- // invocation of flushDataRegionAllTsFiles() is later than the creationTime of the pipe
- // (historicalDataExtractionTimeLowerBound).
- //
- // Note that: the generation time of the TsFile is the time when the TsFile is created, not
- // the time when the data is flushed to the TsFile.
- //
- // Then we can use the generation time of the TsFile to determine whether the data in the
- // TsFile should be extracted by comparing the generation time of the TsFile with the
- // historicalDataExtractionTimeLowerBound when starting the pipe in realtime only mode.
- //
- // If we don't invoke flushDataRegionAllTsFiles() in the realtime only mode, the data generated
- // between the creation time of the pipe the time when the pipe starts will be lost.
- if (historicalDataExtractionTimeLowerBound != Long.MIN_VALUE) {
- synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) {
- final long lastFlushedByPipeTime =
- DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.get(dataRegionId);
- if (System.currentTimeMillis() - lastFlushedByPipeTime >= PIPE_MIN_FLUSH_INTERVAL_IN_MS) {
- flushDataRegionAllTsFiles();
- DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.replace(dataRegionId, System.currentTimeMillis());
- }
- }
- }
-
shouldTransferModFile =
parameters.getBooleanOrDefault(
Arrays.asList(SOURCE_MODS_ENABLE_KEY, EXTRACTOR_MODS_ENABLE_KEY),
@@ -345,24 +308,9 @@
}
}
- private void flushDataRegionAllTsFiles() {
- final DataRegion dataRegion =
- StorageEngine.getInstance().getDataRegion(new DataRegionId(dataRegionId));
- if (Objects.isNull(dataRegion)) {
- return;
- }
-
- dataRegion.writeLock("Pipe: create historical TsFile extractor");
- try {
- dataRegion.syncCloseAllWorkingTsFileProcessors();
- } finally {
- dataRegion.writeUnlock();
- }
- }
-
@Override
public synchronized void start() {
- if (!shouldExtractInsertion) {
+ if (!shouldExtractInsertion || !isHistoricalExtractorEnabled) {
hasBeenStarted = true;
return;
}
@@ -394,48 +342,29 @@
// consensus pipe, and the lastFlushed timestamp is not updated here.
if (pipeName.startsWith(PipeStaticMeta.CONSENSUS_PIPE_PREFIX)) {
dataRegion.syncCloseAllWorkingTsFileProcessors();
- LOGGER.info(
- "Pipe {}@{}: finish to flush data region, took {} ms",
- pipeName,
- dataRegionId,
- System.currentTimeMillis() - startHistoricalExtractionTime);
} else {
- synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) {
- final long lastFlushedByPipeTime =
- DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.get(dataRegionId);
- if (System.currentTimeMillis() - lastFlushedByPipeTime >= PIPE_MIN_FLUSH_INTERVAL_IN_MS) {
- dataRegion.syncCloseAllWorkingTsFileProcessors();
- DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.replace(
- dataRegionId, System.currentTimeMillis());
- LOGGER.info(
- "Pipe {}@{}: finish to flush data region, took {} ms",
- pipeName,
- dataRegionId,
- System.currentTimeMillis() - startHistoricalExtractionTime);
- } else {
- LOGGER.info(
- "Pipe {}@{}: skip to flush data region, last flushed time {} ms ago",
- pipeName,
- dataRegionId,
- System.currentTimeMillis() - lastFlushedByPipeTime);
- }
- }
+ dataRegion.asyncCloseAllWorkingTsFileProcessors();
}
+ LOGGER.info(
+ "Pipe {}@{}: finish to flush data region, took {} ms",
+ pipeName,
+ dataRegionId,
+ System.currentTimeMillis() - startHistoricalExtractionTime);
final TsFileManager tsFileManager = dataRegion.getTsFileManager();
tsFileManager.readLock();
try {
final int originalSequenceTsFileCount = tsFileManager.size(true);
- final int originalUnsequenceTsFileCount = tsFileManager.size(false);
+ final int originalUnSequenceTsFileCount = tsFileManager.size(false);
final List<TsFileResource> resourceList =
- new ArrayList<>(originalSequenceTsFileCount + originalUnsequenceTsFileCount);
+ new ArrayList<>(originalSequenceTsFileCount + originalUnSequenceTsFileCount);
LOGGER.info(
"Pipe {}@{}: start to extract historical TsFile, original sequence file count {}, "
- + "original unsequence file count {}, start progress index {}",
+ + "original unSequence file count {}, start progress index {}",
pipeName,
dataRegionId,
originalSequenceTsFileCount,
- originalUnsequenceTsFileCount,
+ originalUnSequenceTsFileCount,
startIndex);
final Collection<TsFileResource> sequenceTsFileResources =
@@ -445,9 +374,13 @@
// Some resource is marked as deleted but not removed from the list.
!resource.isDeleted()
&& (
- // Some resource may not be closed due to the control of
- // PIPE_MIN_FLUSH_INTERVAL_IN_MS. We simply ignore them.
+ // If the tsFile is not already marked closing, it is not captured by
+ // the pipe realtime module. Thus, we can wait for the realtime sync
+ // module to handle this, to avoid blocking the pipe sync process.
!resource.isClosed()
+ && Optional.ofNullable(resource.getProcessor())
+ .map(TsFileProcessor::alreadyMarkedClosing)
+ .orElse(true)
|| mayTsFileContainUnprocessedData(resource)
&& isTsFileResourceOverlappedWithTimeRange(resource)
&& isTsFileGeneratedAfterExtractionTimeLowerBound(resource)
@@ -455,22 +388,26 @@
.collect(Collectors.toList());
resourceList.addAll(sequenceTsFileResources);
- final Collection<TsFileResource> unsequenceTsFileResources =
+ final Collection<TsFileResource> unSequenceTsFileResources =
tsFileManager.getTsFileList(false).stream()
.filter(
resource ->
// Some resource is marked as deleted but not removed from the list.
!resource.isDeleted()
&& (
- // Some resource may not be closed due to the control of
- // PIPE_MIN_FLUSH_INTERVAL_IN_MS. We simply ignore them.
+ // If the tsFile is not already marked closing, it is not captured by
+ // the pipe realtime module. Thus, we can wait for the realtime sync
+ // module to handle this, to avoid blocking the pipe sync process.
!resource.isClosed()
+ && Optional.ofNullable(resource.getProcessor())
+ .map(TsFileProcessor::alreadyMarkedClosing)
+ .orElse(true)
|| mayTsFileContainUnprocessedData(resource)
&& isTsFileResourceOverlappedWithTimeRange(resource)
&& isTsFileGeneratedAfterExtractionTimeLowerBound(resource)
&& mayTsFileResourceOverlappedWithPattern(resource)))
.collect(Collectors.toList());
- resourceList.addAll(unsequenceTsFileResources);
+ resourceList.addAll(unSequenceTsFileResources);
resourceList.removeIf(
resource -> {
@@ -478,7 +415,7 @@
// Will unpin it after the PipeTsFileInsertionEvent is created and pinned.
try {
PipeDataNodeResourceManager.tsfile()
- .pinTsFileResource(resource, shouldTransferModFile);
+ .pinTsFileResource(resource, shouldTransferModFile, pipeName);
return false;
} catch (final IOException e) {
LOGGER.warn("Pipe: failed to pin TsFileResource {}", resource.getTsFilePath(), e);
@@ -500,10 +437,10 @@
dataRegionId,
sequenceTsFileResources.size(),
originalSequenceTsFileCount,
- unsequenceTsFileResources.size(),
- originalUnsequenceTsFileCount,
+ unSequenceTsFileResources.size(),
+ originalUnSequenceTsFileCount,
resourceList.size(),
- originalSequenceTsFileCount + originalUnsequenceTsFileCount,
+ originalSequenceTsFileCount + originalUnSequenceTsFileCount,
System.currentTimeMillis() - startHistoricalExtractionTime);
} finally {
tsFileManager.readUnlock();
@@ -522,8 +459,8 @@
if (startIndex instanceof StateProgressIndex) {
startIndex = ((StateProgressIndex) startIndex).getInnerProgressIndex();
}
- return !startIndex.isAfter(resource.getMaxProgressIndexAfterClose())
- && !startIndex.equals(resource.getMaxProgressIndexAfterClose());
+ return !startIndex.isAfter(resource.getMaxProgressIndex())
+ && !startIndex.equals(resource.getMaxProgressIndex());
}
private boolean mayTsFileResourceOverlappedWithPattern(final TsFileResource resource) {
@@ -532,7 +469,8 @@
final Map<IDeviceID, Boolean> deviceIsAlignedMap =
PipeDataNodeResourceManager.tsfile()
.getDeviceIsAlignedMapFromCache(
- PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()),
+ PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(
+ resource.getTsFile(), pipeName),
false);
deviceSet =
Objects.nonNull(deviceIsAlignedMap) ? deviceIsAlignedMap.keySet() : resource.getDevices();
@@ -591,6 +529,7 @@
}
final TsFileResource resource = pendingQueue.poll();
+
if (resource == null) {
final PipeTerminateEvent terminateEvent =
new PipeTerminateEvent(pipeName, creationTime, pipeTaskMeta, dataRegionId);
@@ -639,7 +578,7 @@
return isReferenceCountIncreased ? event : null;
} finally {
try {
- PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource);
+ PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource, pipeName);
} catch (final IOException e) {
LOGGER.warn(
"Pipe {}@{}: failed to unpin TsFileResource after creating event, original path: {}",
@@ -669,7 +608,7 @@
pendingQueue.forEach(
resource -> {
try {
- PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource);
+ PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource, pipeName);
} catch (final IOException e) {
LOGGER.warn(
"Pipe {}@{}: failed to unpin TsFileResource after dropping pipe, original path: {}",
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java
index c7a7783..ad8b6c7 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java
@@ -20,6 +20,8 @@
package org.apache.iotdb.db.pipe.extractor.dataregion.realtime;
import org.apache.iotdb.commons.consensus.DataRegionId;
+import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex;
+import org.apache.iotdb.commons.consensus.index.impl.SegmentProgressIndex;
import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException;
import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue;
import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
@@ -35,6 +37,7 @@
import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeInsertionDataNodeListener;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeTimePartitionListener;
+import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.matcher.CachedSchemaPatternMatcher;
import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter;
import org.apache.iotdb.db.storageengine.StorageEngine;
import org.apache.iotdb.db.storageengine.dataregion.DataRegion;
@@ -200,6 +203,10 @@
dataRegionId = String.valueOf(environment.getRegionId());
pipeTaskMeta = environment.getPipeTaskMeta();
+ if (pipeTaskMeta.getProgressIndex() instanceof MinimumProgressIndex) {
+ pipeTaskMeta.updateProgressIndex(new SegmentProgressIndex());
+ }
+
// Metrics related to TsFileEpoch are managed in PipeExtractorMetrics. These metrics are
// indexed by the taskID of IoTDBDataRegionExtractor. To avoid PipeRealtimeDataRegionExtractor
// holding a reference to IoTDBDataRegionExtractor, the taskID should be constructed to
@@ -312,29 +319,7 @@
}
}
- // 1. Check if time parsing is necessary. If not, it means that the timestamps of the data
- // contained in this event are definitely within the time range [start time, end time].
- // 2. Check if the event's data timestamps may intersect with the time range. If not, it means
- // that the data timestamps of this event are definitely not within the time range.
- // 3. Check if pattern parsing is necessary. If not, it means that the paths of the data
- // contained in this event are definitely covered by the pattern.
- // 4. Check if the event's data paths may intersect with the pattern. If not, it means that the
- // data of this event is definitely not overlapped with the pattern.
- if ((!event.shouldParseTime() || event.getEvent().mayEventTimeOverlappedWithTimeRange())
- && (!event.shouldParsePattern() || event.getEvent().mayEventPathsOverlappedWithPattern())) {
- if (sloppyTimeRange) {
- // only skip parsing time for events whose data timestamps may intersect with the time range
- event.skipParsingTime();
- }
- if (sloppyPattern) {
- // only skip parsing pattern for events whose data paths may intersect with the pattern
- event.skipParsingPattern();
- }
-
- doExtract(event);
- } else {
- event.decreaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName(), false);
- }
+ doExtract(event);
synchronized (isClosed) {
if (isClosed.get()) {
@@ -395,6 +380,67 @@
}
}
+ @Override
+ public Event supply() {
+ PipeRealtimeEvent realtimeEvent = getNextRealtimeEvent();
+
+ while (realtimeEvent != null) {
+ final Event suppliedEvent = doSupply(realtimeEvent);
+
+ realtimeEvent.decreaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName(), false);
+
+ if (suppliedEvent != null) {
+ return suppliedEvent;
+ }
+
+ realtimeEvent = getNextRealtimeEvent();
+ }
+
+ // means the pending queue is empty.
+ return null;
+ }
+
+ private PipeRealtimeEvent getNextRealtimeEvent() {
+ PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
+
+ while (realtimeEvent != null
+ && (!CachedSchemaPatternMatcher.match(realtimeEvent, this)
+ || !coarseFilterEvent(realtimeEvent))) {
+ realtimeEvent.decreaseReferenceCount(
+ PipeRealtimeDataRegionTsFileExtractor.class.getName(), false);
+ realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
+ }
+
+ return realtimeEvent;
+ }
+
+ // This may require some time thus we leave it for processor thread instead of writing thread
+ private boolean coarseFilterEvent(final PipeRealtimeEvent event) {
+ // 1. Check if time parsing is necessary. If not, it means that the timestamps of the data
+ // contained in this event are definitely within the time range [start time, end time].
+ // 2. Check if the event's data timestamps may intersect with the time range. If not, it means
+ // that the data timestamps of this event are definitely not within the time range.
+ // 3. Check if pattern parsing is necessary. If not, it means that the paths of the data
+ // contained in this event are definitely covered by the pattern.
+ // 4. Check if the event's data paths may intersect with the pattern. If not, it means that the
+ // data of this event is definitely not overlapped with the pattern.
+ if ((!event.shouldParseTime() || event.getEvent().mayEventTimeOverlappedWithTimeRange())
+ && (!event.shouldParsePattern() || event.getEvent().mayEventPathsOverlappedWithPattern())) {
+ if (sloppyTimeRange) {
+ // only skip parsing time for events whose data timestamps may intersect with the time range
+ event.skipParsingTime();
+ }
+ if (sloppyPattern) {
+ // only skip parsing pattern for events whose data paths may intersect with the pattern
+ event.skipParsingPattern();
+ }
+ return true;
+ }
+ return false;
+ }
+
+ protected abstract Event doSupply(final PipeRealtimeEvent realtimeEvent);
+
protected Event supplyHeartbeat(final PipeRealtimeEvent event) {
if (event.increaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName())) {
return event.getEvent();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHeartbeatExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHeartbeatExtractor.java
index 1df62ee..7ebaa67 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHeartbeatExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHeartbeatExtractor.java
@@ -27,29 +27,13 @@
public class PipeRealtimeDataRegionHeartbeatExtractor extends PipeRealtimeDataRegionExtractor {
@Override
- public Event supply() {
- PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
-
- while (realtimeEvent != null) {
- Event suppliedEvent = null;
-
- // only supply PipeHeartbeatEvent
- if (realtimeEvent.getEvent() instanceof PipeHeartbeatEvent) {
- suppliedEvent = supplyHeartbeat(realtimeEvent);
- } else if (realtimeEvent.getEvent() instanceof ProgressReportEvent) {
- suppliedEvent = supplyDirectly(realtimeEvent);
- }
-
- realtimeEvent.decreaseReferenceCount(
- PipeRealtimeDataRegionHeartbeatExtractor.class.getName(), false);
-
- if (suppliedEvent != null) {
- return suppliedEvent;
- }
-
- realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
+ protected Event doSupply(PipeRealtimeEvent realtimeEvent) {
+ // only supply PipeHeartbeatEvent
+ if (realtimeEvent.getEvent() instanceof PipeHeartbeatEvent) {
+ return supplyHeartbeat(realtimeEvent);
+ } else if (realtimeEvent.getEvent() instanceof ProgressReportEvent) {
+ return supplyDirectly(realtimeEvent);
}
-
return null;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java
index c9d1616..5e086f7 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java
@@ -22,7 +22,6 @@
import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.event.ProgressReportEvent;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent;
@@ -31,12 +30,9 @@
import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager;
-import org.apache.iotdb.db.storageengine.StorageEngine;
-import org.apache.iotdb.db.storageengine.dataregion.wal.WALManager;
import org.apache.iotdb.pipe.api.event.Event;
import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent;
@@ -145,6 +141,8 @@
}
private void extractTsFileInsertion(final PipeRealtimeEvent event) {
+ // Notice that, if the tsFile is partially extracted because the pipe is not opened before, the
+ // former data won't be extracted
event
.getTsFileEpoch()
.migrateState(
@@ -152,34 +150,13 @@
state -> {
switch (state) {
case EMPTY:
+ return ((PipeTsFileInsertionEvent) event.getEvent()).isLoaded()
+ ? TsFileEpoch.State.USING_TSFILE
+ : TsFileEpoch.State.USING_TABLET;
+ case USING_TABLET:
+ return TsFileEpoch.State.USING_TABLET;
case USING_TSFILE:
return TsFileEpoch.State.USING_TSFILE;
- case USING_TABLET:
- if (((PipeTsFileInsertionEvent) event.getEvent()).getFileStartTime()
- < event.getTsFileEpoch().getInsertNodeMinTime()) {
- // Some insert nodes in the tsfile epoch are not captured by pipe, so we should
- // capture the tsfile event to make sure all data in the tsfile epoch can be
- // extracted.
- //
- // The situation can be caused by the following operations:
- // 1. PipeA: start historical data extraction with flush
- // 2. Data insertion
- // 3. PipeB: start realtime data extraction
- // 4. PipeB: start historical data extraction without flush
- // 5. Data inserted in the step2 is not captured by PipeB, and if its tsfile
- // epoch's state is USING_TABLET, the tsfile event will be ignored, which
- // will cause the data loss in the tsfile epoch.
- LOGGER.info(
- "The tsFile {}'s epoch's start time {} is smaller than the captured insertNodes' min time {}, will regard it as data loss or un-sequential, will extract the tsFile",
- ((PipeTsFileInsertionEvent) event.getEvent()).getTsFile(),
- ((PipeTsFileInsertionEvent) event.getEvent()).getFileStartTime(),
- event.getTsFileEpoch().getInsertNodeMinTime());
- return TsFileEpoch.State.USING_BOTH;
- } else {
- // All data in the tsfile epoch has been extracted in tablet mode, so we should
- // simply keep the state of the tsfile epoch and discard the tsfile event.
- return TsFileEpoch.State.USING_TABLET;
- }
case USING_BOTH:
default:
return canNotUseTabletAnyMore(event)
@@ -225,51 +202,13 @@
private boolean canNotUseTabletAnyMore(final PipeRealtimeEvent event) {
// In the following 4 cases, we should not extract this tablet event. all the data
// represented by the tablet event should be carried by the following tsfile event:
- // 0. If the remaining insert event count is too large, we need to reduce the accumulated
- // tablets.
- // 1. If Wal size > maximum size of wal buffer,
// the write operation will be throttled, so we should not extract any more tablet events.
- // 2. The shallow memory usage of the insert node has reached the dangerous threshold.
- // 3. Deprecated logics (unused by default)
- return mayRemainingInsertNodeEventExceedLimit(event)
- || mayWalSizeReachThrottleThreshold(event)
- || mayInsertNodeMemoryReachDangerousThreshold(event)
+ // 1. The shallow memory usage of the insert node has reached the dangerous threshold.
+ // 2. Deprecated logics (unused by default)
+ return mayInsertNodeMemoryReachDangerousThreshold(event)
|| canNotUseTabletAnymoreDeprecated(event);
}
- private boolean mayRemainingInsertNodeEventExceedLimit(final PipeRealtimeEvent event) {
- final boolean mayRemainingInsertEventExceedLimit =
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
- .mayRemainingInsertEventExceedLimit(pipeID);
- if (mayRemainingInsertEventExceedLimit && event.mayExtractorUseTablets(this)) {
- logByLogManager(
- l ->
- l.info(
- "Pipe task {}@{} canNotUseTabletAnyMore(0): remaining insert event has reached max allowed insert event count {}",
- pipeName,
- dataRegionId,
- PipeConfig.getInstance().getPipeMaxAllowedRemainingInsertEventCountPerPipe()));
- }
- return mayRemainingInsertEventExceedLimit;
- }
-
- private boolean mayWalSizeReachThrottleThreshold(final PipeRealtimeEvent event) {
- final boolean mayWalSizeReachThrottleThreshold =
- 3 * WALManager.getInstance().getTotalDiskUsage()
- > IoTDBDescriptor.getInstance().getConfig().getThrottleThreshold();
- if (mayWalSizeReachThrottleThreshold && event.mayExtractorUseTablets(this)) {
- logByLogManager(
- l ->
- l.info(
- "Pipe task {}@{} canNotUseTabletAnyMore(1): Wal size {} has reached throttle threshold {}",
- pipeName,
- dataRegionId,
- WALManager.getInstance().getTotalDiskUsage(),
- IoTDBDescriptor.getInstance().getConfig().getThrottleThreshold() / 3.0d));
- }
- return mayWalSizeReachThrottleThreshold;
- }
-
private boolean mayInsertNodeMemoryReachDangerousThreshold(final PipeRealtimeEvent event) {
final long floatingMemoryUsageInByte =
PipeDataNodeAgent.task().getFloatingMemoryUsageInByte(pipeName);
@@ -277,16 +216,16 @@
final long totalFloatingMemorySizeInBytes =
PipeMemoryManager.getTotalFloatingMemorySizeInBytes();
final boolean mayInsertNodeMemoryReachDangerousThreshold =
- 3 * floatingMemoryUsageInByte * pipeCount >= 2 * totalFloatingMemorySizeInBytes;
+ floatingMemoryUsageInByte * pipeCount >= totalFloatingMemorySizeInBytes;
if (mayInsertNodeMemoryReachDangerousThreshold && event.mayExtractorUseTablets(this)) {
logByLogManager(
l ->
l.info(
- "Pipe task {}@{} canNotUseTabletAnyMore(2): The shallow memory usage of the insert node {} has reached the dangerous threshold {}",
+ "Pipe task {}@{} canNotUseTabletAnyMore(2): The memory usage of the insert node {} has reached the dangerous threshold {}",
pipeName,
dataRegionId,
floatingMemoryUsageInByte * pipeCount,
- 2 * totalFloatingMemorySizeInBytes / 3.0d));
+ totalFloatingMemorySizeInBytes));
}
return mayInsertNodeMemoryReachDangerousThreshold;
}
@@ -305,54 +244,14 @@
private boolean canNotUseTabletAnymoreDeprecated(final PipeRealtimeEvent event) {
// In the following 5 cases, we should not extract any more tablet events. all the data
// represented by the tablet events should be carried by the following tsfile event:
- // 0. If the pipe task is currently restarted.
- // 1. The number of pinned memTables has reached the dangerous threshold.
- // 2. The number of historical tsFile events to transfer has exceeded the limit.
- // 3. The number of realtime tsfile events to transfer has exceeded the limit.
- // 4. The number of linked tsFiles has reached the dangerous threshold.
- return isPipeTaskCurrentlyRestarted(event)
- || mayMemTablePinnedCountReachDangerousThreshold(event)
- || isHistoricalTsFileEventCountExceededLimit(event)
+ // 1. The number of historical tsFile events to transfer has exceeded the limit.
+ // 2. The number of realtime tsfile events to transfer has exceeded the limit.
+ // 3. The number of linked tsFiles has reached the dangerous threshold.
+ return isHistoricalTsFileEventCountExceededLimit(event)
|| isRealtimeTsFileEventCountExceededLimit(event)
|| mayTsFileLinkedCountReachDangerousThreshold(event);
}
- private boolean isPipeTaskCurrentlyRestarted(final PipeRealtimeEvent event) {
- if (!PipeConfig.getInstance().isPipeEpochKeepTsFileAfterStuckRestartEnabled()) {
- return false;
- }
-
- final boolean isPipeTaskCurrentlyRestarted =
- PipeDataNodeAgent.task().isPipeTaskCurrentlyRestarted(pipeName);
- if (isPipeTaskCurrentlyRestarted && event.mayExtractorUseTablets(this)) {
- LOGGER.info(
- "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(0): Pipe task is currently restarted",
- pipeName,
- dataRegionId);
- }
- return isPipeTaskCurrentlyRestarted;
- }
-
- private boolean mayMemTablePinnedCountReachDangerousThreshold(final PipeRealtimeEvent event) {
- if (PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount() == Integer.MAX_VALUE) {
- return false;
- }
- final boolean mayMemTablePinnedCountReachDangerousThreshold =
- PipeDataNodeResourceManager.wal().getPinnedWalCount()
- >= PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount()
- * StorageEngine.getInstance().getDataRegionNumber();
- if (mayMemTablePinnedCountReachDangerousThreshold && event.mayExtractorUseTablets(this)) {
- LOGGER.info(
- "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(1): The number of pinned memTables {} has reached the dangerous threshold {}",
- pipeName,
- dataRegionId,
- PipeDataNodeResourceManager.wal().getPinnedWalCount(),
- PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount()
- * StorageEngine.getInstance().getDataRegionNumber());
- }
- return mayMemTablePinnedCountReachDangerousThreshold;
- }
-
private boolean isHistoricalTsFileEventCountExceededLimit(final PipeRealtimeEvent event) {
if (PipeConfig.getInstance().getPipeMaxAllowedHistoricalTsFilePerDataRegion()
== Integer.MAX_VALUE) {
@@ -399,56 +298,38 @@
return false;
}
final boolean mayTsFileLinkedCountReachDangerousThreshold =
- PipeDataNodeResourceManager.tsfile().getLinkedTsfileCount()
+ PipeDataNodeResourceManager.tsfile().getLinkedTsFileCount(pipeName)
>= PipeConfig.getInstance().getPipeMaxAllowedLinkedTsFileCount();
if (mayTsFileLinkedCountReachDangerousThreshold && event.mayExtractorUseTablets(this)) {
LOGGER.info(
"Pipe task {}@{} canNotUseTabletAnymoreDeprecated(4): The number of linked tsFiles {} has reached the dangerous threshold {}",
pipeName,
dataRegionId,
- PipeDataNodeResourceManager.tsfile().getLinkedTsfileCount(),
+ PipeDataNodeResourceManager.tsfile().getLinkedTsFileCount(pipeName),
PipeConfig.getInstance().getPipeMaxAllowedLinkedTsFileCount());
}
return mayTsFileLinkedCountReachDangerousThreshold;
}
@Override
- public Event supply() {
- PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
-
- while (realtimeEvent != null) {
- final Event suppliedEvent;
-
- // Used to judge the type of the event, not directly for supplying.
- final Event eventToSupply = realtimeEvent.getEvent();
- if (eventToSupply instanceof TabletInsertionEvent) {
- suppliedEvent = supplyTabletInsertion(realtimeEvent);
- } else if (eventToSupply instanceof TsFileInsertionEvent) {
- suppliedEvent = supplyTsFileInsertion(realtimeEvent);
- } else if (eventToSupply instanceof PipeHeartbeatEvent) {
- suppliedEvent = supplyHeartbeat(realtimeEvent);
- } else if (eventToSupply instanceof PipeSchemaRegionWritePlanEvent
- || eventToSupply instanceof ProgressReportEvent) {
- suppliedEvent = supplyDirectly(realtimeEvent);
- } else {
- throw new UnsupportedOperationException(
- String.format(
- "Unsupported event type %s for hybrid realtime extractor %s to supply.",
- eventToSupply.getClass(), this));
- }
-
- realtimeEvent.decreaseReferenceCount(
- PipeRealtimeDataRegionHybridExtractor.class.getName(), false);
-
- if (suppliedEvent != null) {
- return suppliedEvent;
- }
-
- realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
+ protected Event doSupply(final PipeRealtimeEvent realtimeEvent) {
+ // Used to judge the type of the event, not directly for supplying.
+ final Event eventToSupply = realtimeEvent.getEvent();
+ if (eventToSupply instanceof TabletInsertionEvent) {
+ return supplyTabletInsertion(realtimeEvent);
+ } else if (eventToSupply instanceof TsFileInsertionEvent) {
+ return supplyTsFileInsertion(realtimeEvent);
+ } else if (eventToSupply instanceof PipeHeartbeatEvent) {
+ return supplyHeartbeat(realtimeEvent);
+ } else if (eventToSupply instanceof PipeSchemaRegionWritePlanEvent
+ || eventToSupply instanceof ProgressReportEvent) {
+ return supplyDirectly(realtimeEvent);
+ } else {
+ throw new UnsupportedOperationException(
+ String.format(
+ "Unsupported event type %s for hybrid realtime extractor %s to supply.",
+ eventToSupply.getClass(), this));
}
-
- // Means the pending queue is empty.
- return null;
}
private Event supplyTabletInsertion(final PipeRealtimeEvent event) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java
index 4b30035..74d5c37 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java
@@ -82,10 +82,7 @@
private void extractTsFileInsertion(PipeRealtimeEvent event) {
final PipeTsFileInsertionEvent tsFileInsertionEvent =
(PipeTsFileInsertionEvent) event.getEvent();
- if (!(tsFileInsertionEvent.isLoaded()
- // some insert nodes in the tsfile epoch are not captured by pipe
- || tsFileInsertionEvent.getFileStartTime()
- < event.getTsFileEpoch().getInsertNodeMinTime())) {
+ if (!tsFileInsertionEvent.isLoaded()) {
// All data in the tsfile epoch has been extracted in tablet mode, so we should
// simply ignore this event.
event.decreaseReferenceCount(PipeRealtimeDataRegionLogExtractor.class.getName(), false);
@@ -123,46 +120,29 @@
}
@Override
- public Event supply() {
- PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
-
- while (realtimeEvent != null) {
- Event suppliedEvent = null;
-
- if (realtimeEvent.getEvent() instanceof PipeHeartbeatEvent) {
- suppliedEvent = supplyHeartbeat(realtimeEvent);
- } else if (realtimeEvent.getEvent() instanceof PipeSchemaRegionWritePlanEvent
- || realtimeEvent.getEvent() instanceof ProgressReportEvent) {
- suppliedEvent = supplyDirectly(realtimeEvent);
- } else if (realtimeEvent.increaseReferenceCount(
- PipeRealtimeDataRegionLogExtractor.class.getName())) {
- suppliedEvent = realtimeEvent.getEvent();
- } else {
- // if the event's reference count can not be increased, it means the data represented by
- // this event is not reliable anymore. the data has been lost. we simply discard this event
- // and report the exception to PipeRuntimeAgent.
- final String errorMessage =
- String.format(
- "Event %s can not be supplied because "
- + "the reference count can not be increased, "
- + "the data represented by this event is lost",
- realtimeEvent.getEvent());
- LOGGER.error(errorMessage);
- PipeDataNodeAgent.runtime()
- .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage));
- }
-
- realtimeEvent.decreaseReferenceCount(
- PipeRealtimeDataRegionLogExtractor.class.getName(), false);
-
- if (suppliedEvent != null) {
- return suppliedEvent;
- }
-
- realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
+ protected Event doSupply(final PipeRealtimeEvent realtimeEvent) {
+ if (realtimeEvent.getEvent() instanceof PipeHeartbeatEvent) {
+ return supplyHeartbeat(realtimeEvent);
+ } else if (realtimeEvent.getEvent() instanceof PipeSchemaRegionWritePlanEvent
+ || realtimeEvent.getEvent() instanceof ProgressReportEvent) {
+ return supplyDirectly(realtimeEvent);
+ } else if (realtimeEvent.increaseReferenceCount(
+ PipeRealtimeDataRegionLogExtractor.class.getName())) {
+ return realtimeEvent.getEvent();
+ } else {
+ // if the event's reference count can not be increased, it means the data represented by
+ // this event is not reliable anymore. the data has been lost. we simply discard this event
+ // and report the exception to PipeRuntimeAgent.
+ final String errorMessage =
+ String.format(
+ "Event %s can not be supplied because "
+ + "the reference count can not be increased, "
+ + "the data represented by this event is lost",
+ realtimeEvent.getEvent());
+ LOGGER.error(errorMessage);
+ PipeDataNodeAgent.runtime()
+ .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage));
}
-
- // means the pending queue is empty.
return null;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java
index 8072499..d881ec4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java
@@ -84,46 +84,30 @@
}
@Override
- public Event supply() {
- PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
-
- while (realtimeEvent != null) {
- Event suppliedEvent = null;
-
- if (realtimeEvent.getEvent() instanceof PipeHeartbeatEvent) {
- suppliedEvent = supplyHeartbeat(realtimeEvent);
- } else if (realtimeEvent.getEvent() instanceof PipeSchemaRegionWritePlanEvent
- || realtimeEvent.getEvent() instanceof ProgressReportEvent) {
- suppliedEvent = supplyDirectly(realtimeEvent);
- } else if (realtimeEvent.increaseReferenceCount(
- PipeRealtimeDataRegionTsFileExtractor.class.getName())) {
- suppliedEvent = realtimeEvent.getEvent();
- } else {
- // if the event's reference count can not be increased, it means the data represented by
- // this event is not reliable anymore. the data has been lost. we simply discard this event
- // and report the exception to PipeRuntimeAgent.
- final String errorMessage =
- String.format(
- "Event %s can not be supplied because "
- + "the reference count can not be increased, "
- + "the data represented by this event is lost",
- realtimeEvent.getEvent());
- LOGGER.error(errorMessage);
- PipeDataNodeAgent.runtime()
- .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage));
- }
-
- realtimeEvent.decreaseReferenceCount(
- PipeRealtimeDataRegionTsFileExtractor.class.getName(), false);
-
- if (suppliedEvent != null) {
- return suppliedEvent;
- }
-
- realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll();
+ protected Event doSupply(final PipeRealtimeEvent realtimeEvent) {
+ if (realtimeEvent.getEvent() instanceof PipeHeartbeatEvent) {
+ return supplyHeartbeat(realtimeEvent);
+ } else if (realtimeEvent.getEvent() instanceof PipeSchemaRegionWritePlanEvent
+ || realtimeEvent.getEvent() instanceof ProgressReportEvent) {
+ return supplyDirectly(realtimeEvent);
+ } else if (realtimeEvent.increaseReferenceCount(
+ PipeRealtimeDataRegionTsFileExtractor.class.getName())) {
+ return realtimeEvent.getEvent();
+ } else {
+ // if the event's reference count can not be increased, it means the data represented by
+ // this event is not reliable anymore. the data has been lost. we simply discard this event
+ // and report the exception to PipeRuntimeAgent.
+ final String errorMessage =
+ String.format(
+ "Event %s can not be supplied because "
+ + "the reference count can not be increased, "
+ + "the data represented by this event is lost",
+ realtimeEvent.getEvent());
+ LOGGER.error(errorMessage);
+ PipeDataNodeAgent.runtime()
+ .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage));
}
- // means the pending queue is empty.
return null;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java
index ed91f63..0ba9906 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java
@@ -21,171 +21,96 @@
import org.apache.iotdb.commons.consensus.index.ProgressIndex;
import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
import org.apache.iotdb.commons.pipe.event.ProgressReportEvent;
-import org.apache.iotdb.commons.pipe.metric.PipeEventCounter;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent;
import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEventFactory;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor;
-import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.matcher.CachedSchemaPatternMatcher;
-import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.matcher.PipeDataRegionMatcher;
-import org.apache.iotdb.db.pipe.metric.source.PipeAssignerMetrics;
-import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter;
-import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.atomic.AtomicReference;
public class PipeDataRegionAssigner implements Closeable {
private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataRegionAssigner.class);
- private static final PipeConfig PIPE_CONFIG = PipeConfig.getInstance();
-
- /**
- * The {@link PipeDataRegionMatcher} is used to match the event with the extractor based on the
- * pattern.
- */
- private final PipeDataRegionMatcher matcher;
-
- /** The {@link DisruptorQueue} is used to assign the event to the extractor. */
- private final DisruptorQueue disruptor;
-
private final String dataRegionId;
-
- private int counter = 0;
+ protected final Set<PipeRealtimeDataRegionExtractor> extractors = new CopyOnWriteArraySet<>();
private final AtomicReference<ProgressIndex> maxProgressIndexForRealtimeEvent =
new AtomicReference<>(MinimumProgressIndex.INSTANCE);
- private final PipeEventCounter eventCounter = new PipeDataRegionEventCounter();
-
public String getDataRegionId() {
return dataRegionId;
}
public PipeDataRegionAssigner(final String dataRegionId) {
- this.matcher = new CachedSchemaPatternMatcher();
- this.disruptor = new DisruptorQueue(this::assignToExtractor, this::onAssignedHook);
this.dataRegionId = dataRegionId;
- PipeAssignerMetrics.getInstance().register(this);
}
- public void publishToAssign(final PipeRealtimeEvent event) {
- if (!event.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) {
- LOGGER.warn(
- "The reference count of the realtime event {} cannot be increased, skipping it.", event);
- return;
+ public void assignToExtractor(final PipeRealtimeEvent event) {
+ if (event.getEvent() instanceof PipeHeartbeatEvent) {
+ ((PipeHeartbeatEvent) event.getEvent()).onPublished();
}
- final EnrichedEvent innerEvent = event.getEvent();
- eventCounter.increaseEventCount(innerEvent);
- if (innerEvent instanceof PipeHeartbeatEvent) {
- ((PipeHeartbeatEvent) innerEvent).onPublished();
- }
+ extractors.forEach(
+ extractor -> {
+ if (event.getEvent().isGeneratedByPipe() && !extractor.isForwardingPipeRequests()) {
+ final ProgressReportEvent reportEvent =
+ new ProgressReportEvent(
+ extractor.getPipeName(),
+ extractor.getCreationTime(),
+ extractor.getPipeTaskMeta(),
+ extractor.getPipePattern(),
+ extractor.getRealtimeDataExtractionStartTime(),
+ extractor.getRealtimeDataExtractionEndTime());
+ reportEvent.bindProgressIndex(event.getProgressIndex());
+ if (!reportEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) {
+ LOGGER.warn(
+ "The reference count of the event {} cannot be increased, skipping it.",
+ reportEvent);
+ return;
+ }
+ extractor.extract(PipeRealtimeEventFactory.createRealtimeEvent(reportEvent));
+ return;
+ }
- // use synchronized here for completely preventing reference count leaks under extreme thread
- // scheduling when closing
- synchronized (this) {
- if (!disruptor.isClosed()) {
- disruptor.publish(event);
- } else {
- onAssignedHook(event);
- }
- }
- }
+ final PipeRealtimeEvent copiedEvent =
+ event.shallowCopySelfAndBindPipeTaskMetaForProgressReport(
+ extractor.getPipeName(),
+ extractor.getCreationTime(),
+ extractor.getPipeTaskMeta(),
+ extractor.getPipePattern(),
+ extractor.getRealtimeDataExtractionStartTime(),
+ extractor.getRealtimeDataExtractionEndTime());
+ final EnrichedEvent innerEvent = copiedEvent.getEvent();
+ if (innerEvent instanceof PipeTsFileInsertionEvent) {
+ final PipeTsFileInsertionEvent tsFileInsertionEvent =
+ (PipeTsFileInsertionEvent) innerEvent;
+ tsFileInsertionEvent.disableMod4NonTransferPipes(extractor.isShouldTransferModFile());
+ }
- private void onAssignedHook(final PipeRealtimeEvent realtimeEvent) {
- realtimeEvent.gcSchemaInfo();
- realtimeEvent.decreaseReferenceCount(PipeDataRegionAssigner.class.getName(), false);
+ if (innerEvent instanceof PipeTsFileInsertionEvent
+ || innerEvent instanceof PipeInsertNodeTabletInsertionEvent) {
+ bindOrUpdateProgressIndexForRealtimeEvent(copiedEvent);
+ }
- final EnrichedEvent innerEvent = realtimeEvent.getEvent();
- eventCounter.decreaseEventCount(innerEvent);
- if (innerEvent instanceof PipeHeartbeatEvent) {
- ((PipeHeartbeatEvent) innerEvent).onAssigned();
- }
- }
-
- private void assignToExtractor(
- final PipeRealtimeEvent event, final long sequence, final boolean endOfBatch) {
- if (disruptor.isClosed()) {
- return;
- }
-
- matcher
- .match(event)
- .forEach(
- extractor -> {
- if (disruptor.isClosed()) {
- return;
- }
-
- if (event.getEvent().isGeneratedByPipe() && !extractor.isForwardingPipeRequests()) {
- // The frequency of progress reports is limited by the counter, while progress
- // reports to TsFileInsertionEvent are not limited.
- if (!(event.getEvent() instanceof TsFileInsertionEvent)) {
- if (counter < PIPE_CONFIG.getPipeNonForwardingEventsProgressReportInterval()) {
- counter++;
- return;
- }
- counter = 0;
- }
-
- final ProgressReportEvent reportEvent =
- new ProgressReportEvent(
- extractor.getPipeName(),
- extractor.getCreationTime(),
- extractor.getPipeTaskMeta(),
- extractor.getPipePattern(),
- extractor.getRealtimeDataExtractionStartTime(),
- extractor.getRealtimeDataExtractionEndTime());
- reportEvent.bindProgressIndex(event.getProgressIndex());
- if (!reportEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) {
- LOGGER.warn(
- "The reference count of the event {} cannot be increased, skipping it.",
- reportEvent);
- return;
- }
- extractor.extract(PipeRealtimeEventFactory.createRealtimeEvent(reportEvent));
- return;
- }
-
- final PipeRealtimeEvent copiedEvent =
- event.shallowCopySelfAndBindPipeTaskMetaForProgressReport(
- extractor.getPipeName(),
- extractor.getCreationTime(),
- extractor.getPipeTaskMeta(),
- extractor.getPipePattern(),
- extractor.getRealtimeDataExtractionStartTime(),
- extractor.getRealtimeDataExtractionEndTime());
- final EnrichedEvent innerEvent = copiedEvent.getEvent();
- if (innerEvent instanceof PipeTsFileInsertionEvent) {
- final PipeTsFileInsertionEvent tsFileInsertionEvent =
- (PipeTsFileInsertionEvent) innerEvent;
- tsFileInsertionEvent.disableMod4NonTransferPipes(
- extractor.isShouldTransferModFile());
- }
-
- if (innerEvent instanceof PipeTsFileInsertionEvent
- || innerEvent instanceof PipeInsertNodeTabletInsertionEvent) {
- bindOrUpdateProgressIndexForRealtimeEvent(copiedEvent);
- }
-
- if (!copiedEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) {
- LOGGER.warn(
- "The reference count of the event {} cannot be increased, skipping it.",
- copiedEvent);
- return;
- }
- extractor.extract(copiedEvent);
- });
+ if (!copiedEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) {
+ LOGGER.warn(
+ "The reference count of the event {} cannot be increased, skipping it.",
+ copiedEvent);
+ return;
+ }
+ extractor.extract(copiedEvent);
+ });
}
private void bindOrUpdateProgressIndexForRealtimeEvent(final PipeRealtimeEvent event) {
@@ -217,45 +142,26 @@
}
public void startAssignTo(final PipeRealtimeDataRegionExtractor extractor) {
- matcher.register(extractor);
+ extractors.add(extractor);
}
public void stopAssignTo(final PipeRealtimeDataRegionExtractor extractor) {
- matcher.deregister(extractor);
+ extractors.remove(extractor);
}
public boolean notMoreExtractorNeededToBeAssigned() {
- return matcher.getRegisterCount() == 0;
+ return extractors.isEmpty();
}
- /**
- * Clear the matcher and disruptor. The method {@link PipeDataRegionAssigner#publishToAssign}
- * should not be used after calling this method.
- */
@Override
// use synchronized here for completely preventing reference count leaks under extreme thread
// scheduling when closing
public synchronized void close() {
- PipeAssignerMetrics.getInstance().deregister(dataRegionId);
-
final long startTime = System.currentTimeMillis();
- disruptor.shutdown();
- matcher.clear();
+ extractors.clear();
LOGGER.info(
"Pipe: Assigner on data region {} shutdown internal disruptor within {} ms",
dataRegionId,
System.currentTimeMillis() - startTime);
}
-
- public int getTabletInsertionEventCount() {
- return eventCounter.getTabletInsertionEventCount();
- }
-
- public int getTsFileInsertionEventCount() {
- return eventCounter.getTsFileInsertionEventCount();
- }
-
- public int getPipeHeartbeatEventCount() {
- return eventCounter.getPipeHeartbeatEventCount();
- }
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java
index 452c718..69a276b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java
@@ -27,7 +27,6 @@
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
@@ -111,15 +110,12 @@
return;
}
- assigner.publishToAssign(
+ assigner.assignToExtractor(
PipeRealtimeEventFactory.createRealtimeEvent(tsFileResource, isLoaded));
}
public void listenToInsertNode(
- String dataRegionId,
- WALEntryHandler walEntryHandler,
- InsertNode insertNode,
- TsFileResource tsFileResource) {
+ String dataRegionId, InsertNode insertNode, TsFileResource tsFileResource) {
if (listenToInsertNodeExtractorCount.get() == 0) {
return;
}
@@ -131,20 +127,21 @@
return;
}
- assigner.publishToAssign(
- PipeRealtimeEventFactory.createRealtimeEvent(walEntryHandler, insertNode, tsFileResource));
+ assigner.assignToExtractor(
+ PipeRealtimeEventFactory.createRealtimeEvent(insertNode, tsFileResource));
}
public void listenToHeartbeat(boolean shouldPrintMessage) {
dataRegionId2Assigner.forEach(
(key, value) ->
- value.publishToAssign(
+ value.assignToExtractor(
PipeRealtimeEventFactory.createRealtimeEvent(key, shouldPrintMessage)));
}
public void listenToDeleteData(DeleteDataNode node) {
dataRegionId2Assigner.forEach(
- (key, value) -> value.publishToAssign(PipeRealtimeEventFactory.createRealtimeEvent(node)));
+ (key, value) ->
+ value.assignToExtractor(PipeRealtimeEventFactory.createRealtimeEvent(node)));
}
/////////////////////////////// singleton ///////////////////////////////
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/matcher/CachedSchemaPatternMatcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/matcher/CachedSchemaPatternMatcher.java
index 015ece3..1e4e8d4 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/matcher/CachedSchemaPatternMatcher.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/matcher/CachedSchemaPatternMatcher.java
@@ -19,190 +19,84 @@
package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.matcher;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern;
import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent;
import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent;
import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor;
-import com.github.benmanes.caffeine.cache.Cache;
-import com.github.benmanes.caffeine.cache.Caffeine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-public class CachedSchemaPatternMatcher implements PipeDataRegionMatcher {
+public class CachedSchemaPatternMatcher {
protected static final Logger LOGGER = LoggerFactory.getLogger(CachedSchemaPatternMatcher.class);
- protected final ReentrantReadWriteLock lock;
-
- protected final Set<PipeRealtimeDataRegionExtractor> extractors;
- protected final Cache<String, Set<PipeRealtimeDataRegionExtractor>> deviceToExtractorsCache;
-
- public CachedSchemaPatternMatcher() {
- this.lock = new ReentrantReadWriteLock();
- // Should be thread-safe because the extractors will be returned by {@link #match} and
- // iterated by {@link #assignToExtractor}, at the same time the extractors may be added or
- // removed by {@link #register} and {@link #deregister}.
- this.extractors = new CopyOnWriteArraySet<>();
- this.deviceToExtractorsCache =
- Caffeine.newBuilder()
- .maximumSize(PipeConfig.getInstance().getPipeExtractorMatcherCacheSize())
- .build();
- }
-
- @Override
- public void register(final PipeRealtimeDataRegionExtractor extractor) {
- lock.writeLock().lock();
- try {
- extractors.add(extractor);
- deviceToExtractorsCache.invalidateAll();
- } finally {
- lock.writeLock().unlock();
+ public static boolean match(
+ final PipeRealtimeEvent event, final PipeRealtimeDataRegionExtractor extractor) {
+ // HeartbeatEvent will be assigned to all extractors
+ if (event.getEvent() instanceof PipeHeartbeatEvent) {
+ return true;
}
- }
- @Override
- public void deregister(final PipeRealtimeDataRegionExtractor extractor) {
- lock.writeLock().lock();
- try {
- extractors.remove(extractor);
- deviceToExtractorsCache.invalidateAll();
- } finally {
- lock.writeLock().unlock();
+ // Deletion event will be assigned to extractors listened to it
+ if (event.getEvent() instanceof PipeSchemaRegionWritePlanEvent) {
+ return extractor.shouldExtractDeletion();
}
- }
- @Override
- public int getRegisterCount() {
- lock.readLock().lock();
- try {
- return extractors.size();
- } finally {
- lock.readLock().unlock();
- }
- }
+ for (final Map.Entry<String, String[]> entry : event.getSchemaInfo().entrySet()) {
+ final String device = entry.getKey();
+ final String[] measurements = entry.getValue();
- @Override
- public Set<PipeRealtimeDataRegionExtractor> match(final PipeRealtimeEvent event) {
- final Set<PipeRealtimeDataRegionExtractor> matchedExtractors = new HashSet<>();
-
- lock.readLock().lock();
- try {
- if (extractors.isEmpty()) {
- return matchedExtractors;
+ if (!filterByDevice(device, extractor)) {
+ return false;
}
- // HeartbeatEvent will be assigned to all extractors
- if (event.getEvent() instanceof PipeHeartbeatEvent) {
- return extractors;
- }
-
- // Deletion event will be assigned to extractors listened to it
- if (event.getEvent() instanceof PipeSchemaRegionWritePlanEvent) {
- return extractors.stream()
- .filter(PipeRealtimeDataRegionExtractor::shouldExtractDeletion)
- .collect(Collectors.toSet());
- }
-
- for (final Map.Entry<String, String[]> entry : event.getSchemaInfo().entrySet()) {
- final String device = entry.getKey();
- final String[] measurements = entry.getValue();
-
- // 1. try to get matched extractors from cache, if not success, match them by device
- final Set<PipeRealtimeDataRegionExtractor> extractorsFilteredByDevice =
- deviceToExtractorsCache.get(device, this::filterExtractorsByDevice);
- // this would not happen
- if (extractorsFilteredByDevice == null) {
- LOGGER.warn("Match result NPE when handle device {}", device);
- continue;
- }
-
- // 2. filter matched candidate extractors by measurements
- if (measurements.length == 0) {
- // `measurements` is empty (only in case of tsfile event). match all extractors.
- //
- // case 1: the pattern can match all measurements of the device.
- // in this case, the extractor can be matched without checking the measurements.
- //
- // case 2: the pattern may match some measurements of the device.
- // in this case, we can't get all measurements efficiently here,
- // so we just ASSUME the extractor matches and do more checks later.
- matchedExtractors.addAll(extractorsFilteredByDevice);
+ // 2. filter matched candidate extractors by measurements
+ if (measurements.length == 0) {
+ // `measurements` is empty (only in case of tsfile event). match all extractors.
+ //
+ // case 1: the pattern can match all measurements of the device.
+ // in this case, the extractor can be matched without checking the measurements.
+ //
+ // case 2: the pattern may match some measurements of the device.
+ // in this case, we can't get all measurements efficiently here,
+ // so we just ASSUME the extractor matches and do more checks later.
+ return true;
+ } else {
+ final PipePattern pattern = extractor.getPipePattern();
+ if (Objects.isNull(pattern) || pattern.isRoot() || pattern.coversDevice(device)) {
+ // The pattern can match all measurements of the device.
+ return true;
} else {
- // `measurements` is not empty (only in case of tablet event).
- // Match extractors by measurements.
- extractorsFilteredByDevice.forEach(
- extractor -> {
- final PipePattern pattern = extractor.getPipePattern();
- if (Objects.isNull(pattern) || pattern.isRoot() || pattern.coversDevice(device)) {
- // The pattern can match all measurements of the device.
- matchedExtractors.add(extractor);
- } else {
- for (final String measurement : measurements) {
- // Ignore null measurement for partial insert
- if (measurement == null) {
- continue;
- }
+ for (final String measurement : measurements) {
+ // Ignore null measurement for partial insert
+ if (measurement == null) {
+ continue;
+ }
- if (pattern.matchesMeasurement(device, measurement)) {
- matchedExtractors.add(extractor);
- // There would be no more matched extractors because the measurements are
- // unique
- break;
- }
- }
- }
- });
- }
-
- if (matchedExtractors.size() == extractors.size()) {
- break;
+ if (pattern.matchesMeasurement(device, measurement)) {
+ return true;
+ }
+ }
}
}
- } finally {
- lock.readLock().unlock();
}
- return matchedExtractors;
+ return false;
}
- protected Set<PipeRealtimeDataRegionExtractor> filterExtractorsByDevice(final String device) {
- final Set<PipeRealtimeDataRegionExtractor> filteredExtractors = new HashSet<>();
-
- for (final PipeRealtimeDataRegionExtractor extractor : extractors) {
- // Return if the extractor only extract deletion
- if (!extractor.shouldExtractInsertion()) {
- continue;
- }
-
- final PipePattern pipePattern = extractor.getPipePattern();
- if (Objects.isNull(pipePattern) || pipePattern.mayOverlapWithDevice(device)) {
- filteredExtractors.add(extractor);
- }
- }
-
- return filteredExtractors;
+ private static boolean filterByDevice(
+ final String device, final PipeRealtimeDataRegionExtractor extractor) {
+ return extractor.shouldExtractInsertion()
+ && (Objects.isNull(extractor.getPipePattern())
+ || extractor.getPipePattern().mayOverlapWithDevice(device));
}
- @Override
- public void clear() {
- lock.writeLock().lock();
- try {
- extractors.clear();
- deviceToExtractorsCache.invalidateAll();
- deviceToExtractorsCache.cleanUp();
- } finally {
- lock.writeLock().unlock();
- }
+ private CachedSchemaPatternMatcher() {
+ // Utility class
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/matcher/PipeDataRegionMatcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/matcher/PipeDataRegionMatcher.java
deleted file mode 100644
index 4e102a1..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/matcher/PipeDataRegionMatcher.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.matcher;
-
-import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent;
-import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor;
-
-import java.util.Set;
-
-public interface PipeDataRegionMatcher {
-
- /**
- * Register a extractor. If the extractor's pattern matches the event's schema info, the event
- * will be assigned to the extractor.
- */
- void register(PipeRealtimeDataRegionExtractor extractor);
-
- /** Deregister a extractor. */
- void deregister(PipeRealtimeDataRegionExtractor extractor);
-
- /** Get the number of registered extractors in this matcher. */
- int getRegisterCount();
-
- /**
- * Match the event's schema info with the registered extractors' patterns. If the event's schema
- * info matches the pattern of a extractor, the extractor will be returned.
- *
- * @param event the event to be matched
- * @return the matched extractors
- */
- Set<PipeRealtimeDataRegionExtractor> match(PipeRealtimeEvent event);
-
- /** Clear all the registered extractors and internal data structures. */
- void clear();
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/IoTDBSchemaRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/IoTDBSchemaRegionExtractor.java
index b115307..92fc237 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/IoTDBSchemaRegionExtractor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/schemaregion/IoTDBSchemaRegionExtractor.java
@@ -32,7 +32,7 @@
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionSnapshotEvent;
import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionExtractorMetrics;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId;
@@ -75,7 +75,7 @@
listenedTypeSet = SchemaRegionListeningFilter.parseListeningPlanTypeSet(parameters);
PipeSchemaRegionExtractorMetrics.getInstance().register(this);
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance().register(this);
+ PipeDataNodeSinglePipeMetrics.getInstance().register(this);
}
@Override
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java
index 3f03ce5..f5a48b3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java
@@ -20,18 +20,16 @@
package org.apache.iotdb.db.pipe.metric;
import org.apache.iotdb.commons.pipe.metric.PipeEventCommitMetrics;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.pipe.metric.overview.PipeHeartbeatEventMetrics;
import org.apache.iotdb.db.pipe.metric.overview.PipeResourceMetrics;
import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics;
-import org.apache.iotdb.db.pipe.metric.overview.PipeWALInsertNodeCacheMetrics;
import org.apache.iotdb.db.pipe.metric.processor.PipeProcessorMetrics;
import org.apache.iotdb.db.pipe.metric.receiver.PipeDataNodeReceiverMetrics;
import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionConnectorMetrics;
import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionExtractorMetrics;
import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionListenerMetrics;
import org.apache.iotdb.db.pipe.metric.sink.PipeDataRegionConnectorMetrics;
-import org.apache.iotdb.db.pipe.metric.source.PipeAssignerMetrics;
import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -42,36 +40,32 @@
@Override
public void bindTo(final AbstractMetricService metricService) {
- PipeAssignerMetrics.getInstance().bindTo(metricService);
PipeDataRegionExtractorMetrics.getInstance().bindTo(metricService);
PipeProcessorMetrics.getInstance().bindTo(metricService);
PipeDataRegionConnectorMetrics.getInstance().bindTo(metricService);
PipeHeartbeatEventMetrics.getInstance().bindTo(metricService);
- PipeWALInsertNodeCacheMetrics.getInstance().bindTo(metricService);
PipeResourceMetrics.getInstance().bindTo(metricService);
PipeEventCommitMetrics.getInstance().bindTo(metricService);
PipeSchemaRegionListenerMetrics.getInstance().bindTo(metricService);
PipeSchemaRegionExtractorMetrics.getInstance().bindTo(metricService);
PipeSchemaRegionConnectorMetrics.getInstance().bindTo(metricService);
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance().bindTo(metricService);
+ PipeDataNodeSinglePipeMetrics.getInstance().bindTo(metricService);
PipeDataNodeReceiverMetrics.getInstance().bindTo(metricService);
PipeTsFileToTabletsMetrics.getInstance().bindTo(metricService);
}
@Override
public void unbindFrom(final AbstractMetricService metricService) {
- PipeAssignerMetrics.getInstance().unbindFrom(metricService);
PipeDataRegionExtractorMetrics.getInstance().unbindFrom(metricService);
PipeProcessorMetrics.getInstance().unbindFrom(metricService);
PipeDataRegionConnectorMetrics.getInstance().unbindFrom(metricService);
PipeHeartbeatEventMetrics.getInstance().unbindFrom(metricService);
- PipeWALInsertNodeCacheMetrics.getInstance().unbindFrom(metricService);
PipeResourceMetrics.getInstance().unbindFrom(metricService);
PipeEventCommitMetrics.getInstance().unbindFrom(metricService);
PipeSchemaRegionListenerMetrics.getInstance().unbindFrom(metricService);
PipeSchemaRegionExtractorMetrics.getInstance().unbindFrom(metricService);
PipeSchemaRegionConnectorMetrics.getInstance().unbindFrom(metricService);
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance().unbindFrom(metricService);
+ PipeDataNodeSinglePipeMetrics.getInstance().unbindFrom(metricService);
PipeDataNodeReceiverMetrics.getInstance().unbindFrom(metricService);
PipeTsFileToTabletsMetrics.getInstance().unbindFrom(metricService);
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java
index 86368acf..87c7c67 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java
@@ -40,7 +40,7 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
-class PipeDataNodeRemainingEventAndTimeOperator extends PipeRemainingOperator {
+public class PipeDataNodeRemainingEventAndTimeOperator extends PipeRemainingOperator {
// Calculate from schema region extractors directly for it requires less computation
private final Set<IoTDBSchemaRegionExtractor> schemaRegionExtractors =
@@ -59,8 +59,6 @@
private Timer insertNodeTransferTimer = DoNothingMetricManager.DO_NOTHING_TIMER;
private Timer tsfileTransferTimer = DoNothingMetricManager.DO_NOTHING_TIMER;
- private final InsertNodeEMA insertNodeEventCountEMA = new InsertNodeEMA();
-
private double lastDataRegionCommitSmoothingValue = Long.MAX_VALUE;
private double lastSchemaRegionCommitSmoothingValue = Long.MAX_VALUE;
@@ -102,12 +100,23 @@
heartbeatEventCount.decrementAndGet();
}
- double getRemainingInsertEventSmoothingCount() {
- insertNodeEventCountEMA.update(insertNodeEventCount.get());
- return insertNodeEventCountEMA.insertNodeEMAValue;
+ public long getRemainingNonHeartbeatEvents() {
+ final long remainingEvents =
+ tsfileEventCount.get()
+ + rawTabletEventCount.get()
+ + insertNodeEventCount.get()
+ + schemaRegionExtractors.stream()
+ .map(IoTDBSchemaRegionExtractor::getUnTransferredEventCount)
+ .reduce(Long::sum)
+ .orElse(0L);
+
+ // There are cases where the indicator is negative. For example, after the Pipe is restarted,
+ // the Processor SubTask is still collecting Events, resulting in a negative count. This
+ // situation cannot be avoided because the Pipe may be restarted internally.
+ return remainingEvents >= 0 ? remainingEvents : 0;
}
- long getRemainingEvents() {
+ public long getRemainingEvents() {
final long remainingEvents =
tsfileEventCount.get()
+ rawTabletEventCount.get()
@@ -131,7 +140,7 @@
*
* @return The estimated remaining time
*/
- double getRemainingTime() {
+ public double getRemainingTime() {
final PipeRateAverage pipeRemainingTimeCommitRateAverageTime =
PipeConfig.getInstance().getPipeRemainingTimeCommitRateAverageTime();
@@ -266,17 +275,4 @@
dataRegionCommitMeter.set(null);
schemaRegionCommitMeter.set(null);
}
-
- private static class InsertNodeEMA {
- private double insertNodeEMAValue;
-
- public void update(final double newValue) {
- final double alpha = PipeConfig.getInstance().getPipeRemainingInsertNodeCountEMAAlpha();
- if (insertNodeEMAValue == 0) {
- insertNodeEMAValue = newValue;
- } else {
- insertNodeEMAValue = alpha * newValue + (1 - alpha) * insertNodeEMAValue;
- }
- }
- }
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java
similarity index 81%
rename from iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeMetrics.java
rename to iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java
index 354a980..534958e 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java
@@ -20,11 +20,12 @@
package org.apache.iotdb.db.pipe.metric.overview;
import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor;
import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor;
+import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.impl.DoNothingMetricManager;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
@@ -42,15 +43,14 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
-public class PipeDataNodeRemainingEventAndTimeMetrics implements IMetricSet {
+public class PipeDataNodeSinglePipeMetrics implements IMetricSet {
- private static final Logger LOGGER =
- LoggerFactory.getLogger(PipeDataNodeRemainingEventAndTimeMetrics.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataNodeSinglePipeMetrics.class);
@SuppressWarnings("java:S3077")
private volatile AbstractMetricService metricService;
- private final Map<String, PipeDataNodeRemainingEventAndTimeOperator>
+ public final Map<String, PipeDataNodeRemainingEventAndTimeOperator>
remainingEventAndTimeOperatorMap = new ConcurrentHashMap<>();
private static Histogram PIPE_DATANODE_INSERTNODE_TRANSFER_TIME_HISTOGRAM =
@@ -104,6 +104,35 @@
Tag.CREATION_TIME.toString(),
String.valueOf(operator.getCreationTime()));
+ // Resources
+ metricService.createAutoGauge(
+ Metric.PIPE_FLOATING_MEMORY_USAGE.toString(),
+ MetricLevel.IMPORTANT,
+ PipeDataNodeAgent.task(),
+ a -> a.getFloatingMemoryUsageInByte(operator.getPipeName()),
+ Tag.NAME.toString(),
+ operator.getPipeName(),
+ Tag.CREATION_TIME.toString(),
+ String.valueOf(operator.getCreationTime()));
+ metricService.createAutoGauge(
+ Metric.PIPE_LINKED_TSFILE_COUNT.toString(),
+ MetricLevel.IMPORTANT,
+ PipeDataNodeResourceManager.tsfile(),
+ a -> a.getLinkedTsFileCount(operator.getPipeName()),
+ Tag.NAME.toString(),
+ operator.getPipeName(),
+ Tag.CREATION_TIME.toString(),
+ String.valueOf(operator.getCreationTime()));
+ metricService.createAutoGauge(
+ Metric.PIPE_LINKED_TSFILE_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ PipeDataNodeResourceManager.tsfile(),
+ a -> a.getTotalLinkedTsFileSize(operator.getPipeName()),
+ Tag.NAME.toString(),
+ operator.getPipeName(),
+ Tag.CREATION_TIME.toString(),
+ String.valueOf(operator.getCreationTime()));
+
operator.setInsertNodeTransferTimer(
metricService.getOrCreateTimer(
Metric.PIPE_INSERT_NODE_EVENT_TRANSFER_TIME.toString(),
@@ -119,31 +148,6 @@
operator.getPipeName()));
}
- public boolean mayRemainingInsertEventExceedLimit(final String pipeID) {
- if (Objects.isNull(metricService)) {
- return true;
- }
-
- if (remainingEventAndTimeOperatorMap.values().stream()
- .map(PipeDataNodeRemainingEventAndTimeOperator::getRemainingInsertEventSmoothingCount)
- .reduce(0d, Double::sum)
- > PipeConfig.getInstance().getPipeMaxAllowedTotalRemainingInsertEventCount()) {
- return true;
- }
-
- final PipeDataNodeRemainingEventAndTimeOperator operator =
- remainingEventAndTimeOperatorMap.get(pipeID);
- if (Objects.isNull(operator)) {
- LOGGER.warn(
- "Failed to get remaining insert event, RemainingEventAndTimeOperator({}) does not exist, will degrade anyway",
- pipeID);
- return true;
- }
-
- return operator.getRemainingInsertEventSmoothingCount()
- > PipeConfig.getInstance().getPipeMaxAllowedRemainingInsertEventCountPerPipe();
- }
-
@Override
public void unbindFrom(final AbstractMetricService metricService) {
ImmutableSet.copyOf(remainingEventAndTimeOperatorMap.keySet()).forEach(this::deregister);
@@ -186,6 +190,27 @@
Tag.CREATION_TIME.toString(),
String.valueOf(operator.getCreationTime()));
metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PIPE_FLOATING_MEMORY_USAGE.toString(),
+ Tag.NAME.toString(),
+ operator.getPipeName(),
+ Tag.CREATION_TIME.toString(),
+ String.valueOf(operator.getCreationTime()));
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PIPE_LINKED_TSFILE_COUNT.toString(),
+ Tag.NAME.toString(),
+ operator.getPipeName(),
+ Tag.CREATION_TIME.toString(),
+ String.valueOf(operator.getCreationTime()));
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PIPE_LINKED_TSFILE_SIZE.toString(),
+ Tag.NAME.toString(),
+ operator.getPipeName(),
+ Tag.CREATION_TIME.toString(),
+ String.valueOf(operator.getCreationTime()));
+ metricService.remove(
MetricType.TIMER,
Metric.PIPE_INSERT_NODE_EVENT_TRANSFER_TIME.toString(),
Tag.NAME.toString(),
@@ -205,12 +230,13 @@
final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime();
remainingEventAndTimeOperatorMap.computeIfAbsent(
pipeID,
- k ->
- new PipeDataNodeRemainingEventAndTimeOperator(
- extractor.getPipeName(), extractor.getCreationTime()));
- if (Objects.nonNull(metricService)) {
- createMetrics(pipeID);
- }
+ k -> {
+ if (Objects.nonNull(metricService)) {
+ createMetrics(pipeID);
+ }
+ return new PipeDataNodeRemainingEventAndTimeOperator(
+ extractor.getPipeName(), extractor.getCreationTime());
+ });
}
public void register(final IoTDBSchemaRegionExtractor extractor) {
@@ -219,13 +245,14 @@
remainingEventAndTimeOperatorMap
.computeIfAbsent(
pipeID,
- k ->
- new PipeDataNodeRemainingEventAndTimeOperator(
- extractor.getPipeName(), extractor.getCreationTime()))
+ k -> {
+ if (Objects.nonNull(metricService)) {
+ createMetrics(pipeID);
+ }
+ return new PipeDataNodeRemainingEventAndTimeOperator(
+ extractor.getPipeName(), extractor.getCreationTime());
+ })
.register(extractor);
- if (Objects.nonNull(metricService)) {
- createMetrics(pipeID);
- }
}
public void increaseInsertNodeEventCount(final String pipeName, final long creationTime) {
@@ -381,19 +408,19 @@
private static class PipeDataNodeRemainingEventAndTimeMetricsHolder {
- private static final PipeDataNodeRemainingEventAndTimeMetrics INSTANCE =
- new PipeDataNodeRemainingEventAndTimeMetrics();
+ private static final PipeDataNodeSinglePipeMetrics INSTANCE =
+ new PipeDataNodeSinglePipeMetrics();
private PipeDataNodeRemainingEventAndTimeMetricsHolder() {
// Empty constructor
}
}
- public static PipeDataNodeRemainingEventAndTimeMetrics getInstance() {
+ public static PipeDataNodeSinglePipeMetrics getInstance() {
return PipeDataNodeRemainingEventAndTimeMetricsHolder.INSTANCE;
}
- private PipeDataNodeRemainingEventAndTimeMetrics() {
+ private PipeDataNodeSinglePipeMetrics() {
PipeEventCommitManager.getInstance().setCommitRateMarker(this::markRegionCommit);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java
index c911557..37f8eb5 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java
@@ -22,10 +22,9 @@
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager;
import org.apache.iotdb.commons.service.metric.enums.Metric;
import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager;
-import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager;
-import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
import org.apache.iotdb.metrics.utils.MetricLevel;
@@ -34,6 +33,7 @@
public class PipeResourceMetrics implements IMetricSet {
private static final String PIPE_USED_MEMORY = "PipeUsedMemory";
+ private static final String PIPE_USED_FLOATING_MEMORY = "PipeUsedFloatingMemory";
private static final String PIPE_TABLET_USED_MEMORY = "PipeTabletUsedMemory";
@@ -41,6 +41,8 @@
private static final String PIPE_TOTAL_MEMORY = "PipeTotalMemory";
+ private static final String PIPE_FLOATING_MEMORY = "PipeFloatingMemory";
+
//////////////////////////// bindTo & unbindFrom (metric framework) ////////////////////////////
@Override
@@ -74,22 +76,20 @@
o -> PipeMemoryManager.getTotalNonFloatingMemorySizeInBytes(),
Tag.NAME.toString(),
PIPE_TOTAL_MEMORY);
- // resource reference count
metricService.createAutoGauge(
- Metric.PIPE_PINNED_MEMTABLE_COUNT.toString(),
+ Metric.PIPE_MEM.toString(),
MetricLevel.IMPORTANT,
- PipeDataNodeResourceManager.wal(),
- PipeWALResourceManager::getPinnedWalCount);
+ PipeDataNodeResourceManager.memory(),
+ o -> PipeMemoryManager.getTotalFloatingMemorySizeInBytes(),
+ Tag.NAME.toString(),
+ PIPE_FLOATING_MEMORY);
metricService.createAutoGauge(
- Metric.PIPE_LINKED_TSFILE_COUNT.toString(),
+ Metric.PIPE_MEM.toString(),
MetricLevel.IMPORTANT,
- PipeDataNodeResourceManager.tsfile(),
- PipeTsFileResourceManager::getLinkedTsfileCount);
- metricService.createAutoGauge(
- Metric.PIPE_LINKED_TSFILE_SIZE.toString(),
- MetricLevel.IMPORTANT,
- PipeDataNodeResourceManager.tsfile(),
- PipeTsFileResourceManager::getTotalLinkedTsfileSize);
+ PipeDataNodeResourceManager.memory(),
+ o -> PipeDataNodeAgent.task().getAllFloatingMemoryUsageInByte(),
+ Tag.NAME.toString(),
+ PIPE_USED_FLOATING_MEMORY);
// phantom reference count
metricService.createAutoGauge(
Metric.PIPE_PHANTOM_REFERENCE_COUNT.toString(),
@@ -115,8 +115,17 @@
PIPE_TS_FILE_USED_MEMORY);
metricService.remove(
MetricType.AUTO_GAUGE, Metric.PIPE_MEM.toString(), Tag.NAME.toString(), PIPE_TOTAL_MEMORY);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PIPE_MEM.toString(),
+ Tag.NAME.toString(),
+ PIPE_FLOATING_MEMORY);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PIPE_MEM.toString(),
+ Tag.NAME.toString(),
+ PIPE_USED_FLOATING_MEMORY);
// resource reference count
- metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_PINNED_MEMTABLE_COUNT.toString());
metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_LINKED_TSFILE_COUNT.toString());
metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_LINKED_TSFILE_SIZE.toString());
// phantom reference count
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java
index 04015fe..16108b3 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeTsFileToTabletsMetrics.java
@@ -104,8 +104,7 @@
public void register(final IoTDBDataRegionExtractor extractor) {
final String pipeID = extractor.getPipeName() + "_" + extractor.getCreationTime();
- pipe.add(pipeID);
- if (Objects.nonNull(metricService)) {
+ if (Objects.nonNull(metricService) && pipe.add(pipeID)) {
createMetrics(pipeID);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeWALInsertNodeCacheMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeWALInsertNodeCacheMetrics.java
deleted file mode 100644
index b2e605b..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeWALInsertNodeCacheMetrics.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.metric.overview;
-
-import org.apache.iotdb.commons.service.metric.enums.Metric;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache;
-import org.apache.iotdb.metrics.AbstractMetricService;
-import org.apache.iotdb.metrics.metricsets.IMetricSet;
-import org.apache.iotdb.metrics.utils.MetricLevel;
-import org.apache.iotdb.metrics.utils.MetricType;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PipeWALInsertNodeCacheMetrics implements IMetricSet {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALInsertNodeCacheMetrics.class);
-
- //////////////////////////// bindTo & unbindFrom (metric framework) ////////////////////////////
-
- @Override
- public void bindTo(AbstractMetricService metricService) {
- metricService.createAutoGauge(
- Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE.toString(),
- MetricLevel.IMPORTANT,
- WALInsertNodeCache.getInstance(),
- WALInsertNodeCache::getCacheHitRate);
- metricService.createAutoGauge(
- Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT.toString(),
- MetricLevel.IMPORTANT,
- WALInsertNodeCache.getInstance(),
- WALInsertNodeCache::getCacheHitCount);
- metricService.createAutoGauge(
- Metric.PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT.toString(),
- MetricLevel.IMPORTANT,
- WALInsertNodeCache.getInstance(),
- WALInsertNodeCache::getCacheRequestCount);
- }
-
- @Override
- public void unbindFrom(AbstractMetricService metricService) {
- metricService.remove(
- MetricType.AUTO_GAUGE, Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE.toString());
- metricService.remove(
- MetricType.AUTO_GAUGE, Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT.toString());
- metricService.remove(
- MetricType.AUTO_GAUGE, Metric.PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT.toString());
- }
-
- //////////////////////////// singleton ////////////////////////////
-
- private static class PipeWALInsertNodeCacheMetricsHolder {
-
- private static final PipeWALInsertNodeCacheMetrics INSTANCE =
- new PipeWALInsertNodeCacheMetrics();
-
- private PipeWALInsertNodeCacheMetricsHolder() {
- // empty constructor
- }
- }
-
- public static PipeWALInsertNodeCacheMetrics getInstance() {
- return PipeWALInsertNodeCacheMetricsHolder.INSTANCE;
- }
-
- private PipeWALInsertNodeCacheMetrics() {
- // empty constructor
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java
deleted file mode 100644
index 3a35305..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/source/PipeAssignerMetrics.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.metric.source;
-
-import org.apache.iotdb.commons.service.metric.enums.Metric;
-import org.apache.iotdb.commons.service.metric.enums.Tag;
-import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeDataRegionAssigner;
-import org.apache.iotdb.metrics.AbstractMetricService;
-import org.apache.iotdb.metrics.metricsets.IMetricSet;
-import org.apache.iotdb.metrics.utils.MetricLevel;
-import org.apache.iotdb.metrics.utils.MetricType;
-
-import com.google.common.collect.ImmutableSet;
-import org.checkerframework.checker.nullness.qual.NonNull;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-
-public class PipeAssignerMetrics implements IMetricSet {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(PipeAssignerMetrics.class);
-
- private AbstractMetricService metricService;
-
- private final Map<String, PipeDataRegionAssigner> assignerMap = new HashMap<>();
-
- //////////////////////////// bindTo & unbindFrom (metric framework) ////////////////////////////
-
- @Override
- public void bindTo(AbstractMetricService metricService) {
- this.metricService = metricService;
- synchronized (this) {
- for (String dataRegionId : assignerMap.keySet()) {
- createMetrics(dataRegionId);
- }
- }
- }
-
- private void createMetrics(String dataRegionId) {
- createAutoGauge(dataRegionId);
- }
-
- private void createAutoGauge(String dataRegionId) {
- metricService.createAutoGauge(
- Metric.UNASSIGNED_HEARTBEAT_COUNT.toString(),
- MetricLevel.IMPORTANT,
- assignerMap.get(dataRegionId),
- PipeDataRegionAssigner::getPipeHeartbeatEventCount,
- Tag.REGION.toString(),
- dataRegionId);
- metricService.createAutoGauge(
- Metric.UNASSIGNED_TABLET_COUNT.toString(),
- MetricLevel.IMPORTANT,
- assignerMap.get(dataRegionId),
- PipeDataRegionAssigner::getTabletInsertionEventCount,
- Tag.REGION.toString(),
- dataRegionId);
- metricService.createAutoGauge(
- Metric.UNASSIGNED_TSFILE_COUNT.toString(),
- MetricLevel.IMPORTANT,
- assignerMap.get(dataRegionId),
- PipeDataRegionAssigner::getTsFileInsertionEventCount,
- Tag.REGION.toString(),
- dataRegionId);
- }
-
- @Override
- public void unbindFrom(AbstractMetricService metricService) {
- ImmutableSet<String> dataRegionIds = ImmutableSet.copyOf(assignerMap.keySet());
- for (String dataRegionId : dataRegionIds) {
- deregister(dataRegionId);
- }
- if (!assignerMap.isEmpty()) {
- LOGGER.warn("Failed to unbind from pipe assigner metrics, assigner map not empty");
- }
- }
-
- private void removeMetrics(String dataRegionId) {
- removeAutoGauge(dataRegionId);
- }
-
- private void removeAutoGauge(String dataRegionId) {
- metricService.remove(
- MetricType.AUTO_GAUGE,
- Metric.UNASSIGNED_HEARTBEAT_COUNT.toString(),
- Tag.REGION.toString(),
- dataRegionId);
- metricService.remove(
- MetricType.AUTO_GAUGE,
- Metric.UNASSIGNED_TABLET_COUNT.toString(),
- Tag.REGION.toString(),
- dataRegionId);
- metricService.remove(
- MetricType.AUTO_GAUGE,
- Metric.UNASSIGNED_TSFILE_COUNT.toString(),
- Tag.REGION.toString(),
- dataRegionId);
- }
-
- //////////////////////////// register & deregister (pipe integration) ////////////////////////////
-
- public void register(@NonNull PipeDataRegionAssigner pipeDataRegionAssigner) {
- String dataRegionId = pipeDataRegionAssigner.getDataRegionId();
- synchronized (this) {
- assignerMap.putIfAbsent(dataRegionId, pipeDataRegionAssigner);
- if (Objects.nonNull(metricService)) {
- createMetrics(dataRegionId);
- }
- }
- }
-
- public void deregister(String dataRegionId) {
- synchronized (this) {
- if (!assignerMap.containsKey(dataRegionId)) {
- LOGGER.warn(
- "Failed to deregister pipe assigner metrics, PipeDataRegionAssigner({}) does not exist",
- dataRegionId);
- return;
- }
- if (Objects.nonNull(metricService)) {
- removeMetrics(dataRegionId);
- }
- assignerMap.remove(dataRegionId);
- }
- }
-
- //////////////////////////// singleton ////////////////////////////
-
- private static class PipeAssignerMetricsHolder {
-
- private static final PipeAssignerMetrics INSTANCE = new PipeAssignerMetrics();
-
- private PipeAssignerMetricsHolder() {
- // empty constructor
- }
- }
-
- public static PipeAssignerMetrics getInstance() {
- return PipeAssignerMetrics.PipeAssignerMetricsHolder.INSTANCE;
- }
-
- private PipeAssignerMetrics() {
- // empty constructor
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java
index cb1ba0b..0e1f0f2 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java
@@ -114,7 +114,6 @@
version = transferReq.version;
type = transferReq.type;
- body = transferReq.body;
return this;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java
index b20904a..752be40 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java
@@ -99,7 +99,6 @@
version = transferReq.version;
type = transferReq.type;
- body = transferReq.body;
return this;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java
index 00a7e4c..5009cae 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java
@@ -174,6 +174,14 @@
case HANDSHAKE_DATANODE_V1:
{
try {
+ if (PipeConfig.getInstance().isPipeEnableMemoryCheck()
+ && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes()
+ < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) {
+ return new TPipeTransferResp(
+ RpcUtils.getStatus(
+ TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(),
+ "The receiver memory is not enough to handle the handshake request from datanode."));
+ }
return handleTransferHandshakeV1(
PipeTransferDataNodeHandshakeV1Req.fromTPipeTransferReq(req));
} finally {
@@ -184,6 +192,14 @@
case HANDSHAKE_DATANODE_V2:
{
try {
+ if (PipeConfig.getInstance().isPipeEnableMemoryCheck()
+ && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes()
+ < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) {
+ return new TPipeTransferResp(
+ RpcUtils.getStatus(
+ TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(),
+ "The receiver memory is not enough to handle the handshake request from datanode."));
+ }
return handleTransferHandshakeV2(
PipeTransferDataNodeHandshakeV2Req.fromTPipeTransferReq(req));
} finally {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java
index b58d934..3afe5eb 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner.java
@@ -21,9 +21,9 @@
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager;
+import org.apache.iotdb.commons.utils.FileUtils;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -52,7 +52,9 @@
LOGGER.info(
"Pipe hardlink dir found, deleting it: {}, result: {}",
pipeHardLinkDir,
- FileUtils.deleteQuietly(pipeHardLinkDir));
+ // For child directories, we need them to recover each pipe's progress
+ // Hence we do not delete them here, only delete the child files (older version)
+ FileUtils.deleteFileChildrenQuietly(pipeHardLinkDir));
}
}
}
@@ -65,7 +67,7 @@
+ PipeSnapshotResourceManager.PIPE_SNAPSHOT_DIR_NAME);
if (pipeConsensusDir.isDirectory()) {
LOGGER.info("Pipe snapshot dir found, deleting it: {},", pipeConsensusDir);
- org.apache.iotdb.commons.utils.FileUtils.deleteFileOrDirectory(pipeConsensusDir);
+ FileUtils.deleteFileOrDirectory(pipeConsensusDir);
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java
index 573106e..aaf9eff 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java
@@ -19,24 +19,19 @@
package org.apache.iotdb.db.pipe.resource;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.pipe.resource.log.PipeLogManager;
import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager;
import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager;
import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager;
import org.apache.iotdb.db.pipe.resource.ref.PipeDataNodePhantomReferenceManager;
import org.apache.iotdb.db.pipe.resource.snapshot.PipeDataNodeSnapshotResourceManager;
+import org.apache.iotdb.db.pipe.resource.tsfile.PipeCompactionManager;
import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager;
-import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager;
-import org.apache.iotdb.db.pipe.resource.wal.hardlink.PipeWALHardlinkResourceManager;
-import org.apache.iotdb.db.pipe.resource.wal.selfhost.PipeWALSelfHostResourceManager;
-
-import java.util.concurrent.atomic.AtomicReference;
public class PipeDataNodeResourceManager {
private final PipeTsFileResourceManager pipeTsFileResourceManager;
- private final AtomicReference<PipeWALResourceManager> pipeWALResourceManager;
+ private final PipeCompactionManager pipeCompactionManager;
private final PipeSnapshotResourceManager pipeSnapshotResourceManager;
private final PipeMemoryManager pipeMemoryManager;
private final PipeLogManager pipeLogManager;
@@ -46,18 +41,8 @@
return PipeResourceManagerHolder.INSTANCE.pipeTsFileResourceManager;
}
- public static PipeWALResourceManager wal() {
- if (PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get() == null) {
- synchronized (PipeResourceManagerHolder.INSTANCE) {
- if (PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get() == null) {
- PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.set(
- PipeConfig.getInstance().getPipeHardLinkWALEnabled()
- ? new PipeWALHardlinkResourceManager()
- : new PipeWALSelfHostResourceManager());
- }
- }
- }
- return PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get();
+ public static PipeCompactionManager compaction() {
+ return PipeResourceManagerHolder.INSTANCE.pipeCompactionManager;
}
public static PipeSnapshotResourceManager snapshot() {
@@ -80,7 +65,7 @@
private PipeDataNodeResourceManager() {
pipeTsFileResourceManager = new PipeTsFileResourceManager();
- pipeWALResourceManager = new AtomicReference<>();
+ pipeCompactionManager = new PipeCompactionManager();
pipeSnapshotResourceManager = new PipeDataNodeSnapshotResourceManager();
pipeMemoryManager = new PipeMemoryManager();
pipeLogManager = new PipeLogManager();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java
index 690a7c5..8a87cf8 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java
@@ -54,9 +54,6 @@
private volatile long usedMemorySizeInBytesOfTsFiles;
- private static final double FLOATING_MEMORY_RATIO =
- PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion();
-
// Only non-zero memory blocks will be added to this set.
private final Set<PipeMemoryBlock> allocatedBlocks = new HashSet<>();
@@ -104,18 +101,6 @@
* getTotalNonFloatingMemorySizeInBytes();
}
- public long getAllocatedMemorySizeInBytesOfWAL() {
- return (long)
- (PipeConfig.getInstance().getPipeDataStructureWalMemoryProportion()
- * getTotalNonFloatingMemorySizeInBytes());
- }
-
- public long getAllocatedMemorySizeInBytesOfBatch() {
- return (long)
- (PipeConfig.getInstance().getPipeDataStructureBatchMemoryProportion()
- * getTotalNonFloatingMemorySizeInBytes());
- }
-
public boolean isEnough4TabletParsing() {
return (double) usedMemorySizeInBytesOfTablets + (double) usedMemorySizeInBytesOfTsFiles
< EXCEED_PROTECT_THRESHOLD * allowedMaxMemorySizeInBytesOfTabletsAndTsFiles()
@@ -648,15 +633,29 @@
return usedMemorySizeInBytesOfTsFiles;
}
+ public long getAllocatedMemorySizeInBytesOfBatch() {
+ return (long)
+ (PipeConfig.getInstance().getPipeDataStructureBatchMemoryProportion()
+ * getTotalNonFloatingMemorySizeInBytes());
+ }
+
public long getFreeMemorySizeInBytes() {
return TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes;
}
public static long getTotalNonFloatingMemorySizeInBytes() {
- return (long) (TOTAL_MEMORY_SIZE_IN_BYTES * (1 - FLOATING_MEMORY_RATIO));
+ return (long)
+ (TOTAL_MEMORY_SIZE_IN_BYTES
+ * (1 - PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion()));
}
public static long getTotalFloatingMemorySizeInBytes() {
- return (long) (TOTAL_MEMORY_SIZE_IN_BYTES * FLOATING_MEMORY_RATIO);
+ return (long)
+ (TOTAL_MEMORY_SIZE_IN_BYTES
+ * PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion());
+ }
+
+ public static long getTotalMemorySizeInBytes() {
+ return TOTAL_MEMORY_SIZE_IN_BYTES;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java
index cd9a0a7..d82c261 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java
@@ -247,8 +247,8 @@
return totalSizeInBytes;
}
- public static long calculateBatchDataRamBytesUsed(BatchData batchData) {
- long totalSizeInBytes = 0;
+ public static int calculateBatchDataRamBytesUsed(BatchData batchData) {
+ int totalSizeInBytes = 0;
// timestamp
totalSizeInBytes += 8;
@@ -263,16 +263,16 @@
continue;
}
// consider variable references (plus 8) and memory alignment (round up to 8)
- totalSizeInBytes += roundUpToMultiple(primitiveType.getSize() + 8L, 8);
+ totalSizeInBytes += roundUpToMultiple(primitiveType.getSize() + 8, 8);
}
} else {
if (type.isBinary()) {
final Binary binary = batchData.getBinary();
// refer to org.apache.tsfile.utils.TsPrimitiveType.TsBinary.getSize
totalSizeInBytes +=
- roundUpToMultiple((binary == null ? 8 : binary.ramBytesUsed()) + 8L, 8);
+ roundUpToMultiple((binary == null ? 8 : binary.getLength() + 8) + 8, 8);
} else {
- totalSizeInBytes += roundUpToMultiple(TsPrimitiveType.getByType(type).getSize() + 8L, 8);
+ totalSizeInBytes += roundUpToMultiple(TsPrimitiveType.getByType(type).getSize() + 8, 8);
}
}
}
@@ -287,7 +287,7 @@
* @param n The specified multiple.
* @return The nearest multiple of n greater than or equal to num.
*/
- private static long roundUpToMultiple(long num, int n) {
+ private static int roundUpToMultiple(int num, int n) {
if (n == 0) {
throw new IllegalArgumentException("The multiple n must be greater than 0");
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java
new file mode 100644
index 0000000..29d880c
--- /dev/null
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.pipe.resource.tsfile;
+
+import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue;
+import org.apache.iotdb.db.pipe.agent.task.subtask.connector.PipeConnectorSubtaskLifeCycle;
+import org.apache.iotdb.db.pipe.agent.task.subtask.connector.PipeRealtimePriorityBlockingQueue;
+import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
+import org.apache.iotdb.pipe.api.event.Event;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+public class PipeCompactionManager {
+
+ private final Set<PipeConnectorSubtaskLifeCycle> pipeConnectorSubtaskLifeCycles =
+ new CopyOnWriteArraySet<>();
+
+ public void registerPipeConnectorSubtaskLifeCycle(
+ final PipeConnectorSubtaskLifeCycle pipeConnectorSubtaskLifeCycle) {
+ pipeConnectorSubtaskLifeCycles.add(pipeConnectorSubtaskLifeCycle);
+ }
+
+ public void deregisterPipeConnectorSubtaskLifeCycle(
+ final PipeConnectorSubtaskLifeCycle pipeConnectorSubtaskLifeCycle) {
+ pipeConnectorSubtaskLifeCycles.remove(pipeConnectorSubtaskLifeCycle);
+ }
+
+ public void emitResult(
+ final String storageGroupName,
+ final String dataRegionId,
+ final long timePartition,
+ final List<TsFileResource> seqFileResources,
+ final List<TsFileResource> unseqFileResources,
+ final List<TsFileResource> targetFileResources) {
+ final Set<TsFileResource> sourceFileResources = new HashSet<>(seqFileResources);
+ sourceFileResources.addAll(unseqFileResources);
+
+ for (final PipeConnectorSubtaskLifeCycle lifeCycle : pipeConnectorSubtaskLifeCycles) {
+ final UnboundedBlockingPendingQueue<Event> pendingQueue = lifeCycle.getPendingQueue();
+ // TODO: support non realtime priority blocking queue
+ if (pendingQueue instanceof PipeRealtimePriorityBlockingQueue) {
+ final PipeRealtimePriorityBlockingQueue realtimePriorityBlockingQueue =
+ (PipeRealtimePriorityBlockingQueue) pendingQueue;
+ realtimePriorityBlockingQueue.replace(
+ dataRegionId, sourceFileResources, targetFileResources);
+ }
+ }
+ }
+}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileMemResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileMemResource.java
new file mode 100644
index 0000000..3673afb
--- /dev/null
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileMemResource.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.pipe.resource.tsfile;
+
+import org.apache.iotdb.commons.pipe.config.PipeConfig;
+import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
+import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
+import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil;
+
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.read.TsFileDeviceIterator;
+import org.apache.tsfile.read.TsFileSequenceReader;
+import org.apache.tsfile.utils.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public class PipeTsFileMemResource extends PipeTsFileResource {
+ private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileMemResource.class);
+ public static final float MEMORY_SUFFICIENT_THRESHOLD = 0.7f;
+ private PipeMemoryBlock allocatedMemoryBlock;
+ private Map<IDeviceID, List<String>> deviceMeasurementsMap = null;
+ private Map<IDeviceID, Boolean> deviceIsAlignedMap = null;
+ private Map<String, TSDataType> measurementDataTypeMap = null;
+
+ public PipeTsFileMemResource() {
+ super(null);
+ }
+
+ @Override
+ public void close() {
+ if (deviceMeasurementsMap != null) {
+ deviceMeasurementsMap = null;
+ }
+
+ if (deviceIsAlignedMap != null) {
+ deviceIsAlignedMap = null;
+ }
+
+ if (measurementDataTypeMap != null) {
+ measurementDataTypeMap = null;
+ }
+
+ if (allocatedMemoryBlock != null) {
+ allocatedMemoryBlock.close();
+ allocatedMemoryBlock = null;
+ }
+ }
+
+ //////////////////////////// Cache Getter ////////////////////////////
+
+ public synchronized Map<IDeviceID, List<String>> tryGetDeviceMeasurementsMap(final File tsFile)
+ throws IOException {
+ if (deviceMeasurementsMap == null) {
+ cacheObjectsIfAbsent(tsFile);
+ }
+ return deviceMeasurementsMap;
+ }
+
+ public synchronized Map<IDeviceID, Boolean> tryGetDeviceIsAlignedMap(
+ final boolean cacheOtherMetadata, final File tsFile) throws IOException {
+ if (deviceIsAlignedMap == null) {
+ if (cacheOtherMetadata) {
+ cacheObjectsIfAbsent(tsFile);
+ } else {
+ cacheDeviceIsAlignedMapIfAbsent(tsFile);
+ }
+ }
+ return deviceIsAlignedMap;
+ }
+
+ public synchronized Map<String, TSDataType> tryGetMeasurementDataTypeMap(final File tsFile)
+ throws IOException {
+ if (measurementDataTypeMap == null) {
+ cacheObjectsIfAbsent(tsFile);
+ }
+ return measurementDataTypeMap;
+ }
+
+ synchronized boolean cacheDeviceIsAlignedMapIfAbsent(final File tsFile) throws IOException {
+
+ if (allocatedMemoryBlock != null) {
+ // This means objects are already cached.
+ return true;
+ }
+
+ // See if pipe memory is sufficient to be allocated for TsFileSequenceReader.
+ // Only allocate when pipe memory used is less than 50%, because memory here
+ // is hard to shrink and may consume too much memory.
+ allocatedMemoryBlock =
+ PipeDataNodeResourceManager.memory()
+ .forceAllocateIfSufficient(
+ PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(),
+ MEMORY_SUFFICIENT_THRESHOLD);
+ if (allocatedMemoryBlock == null) {
+ LOGGER.info(
+ "Failed to cacheDeviceIsAlignedMapIfAbsent for tsfile {}, because memory usage is high",
+ tsFile.getPath());
+ return false;
+ }
+
+ long memoryRequiredInBytes = 0L;
+ try (TsFileSequenceReader sequenceReader =
+ new TsFileSequenceReader(tsFile.getPath(), true, false)) {
+ deviceIsAlignedMap = new HashMap<>();
+ final TsFileDeviceIterator deviceIsAlignedIterator =
+ sequenceReader.getAllDevicesIteratorWithIsAligned();
+ while (deviceIsAlignedIterator.hasNext()) {
+ final Pair<IDeviceID, Boolean> deviceIsAlignedPair = deviceIsAlignedIterator.next();
+ deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight());
+ }
+ memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap);
+ }
+ // Release memory of TsFileSequenceReader.
+ allocatedMemoryBlock.close();
+ allocatedMemoryBlock = null;
+
+ // Allocate again for the cached objects.
+ allocatedMemoryBlock =
+ PipeDataNodeResourceManager.memory()
+ .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD);
+ if (allocatedMemoryBlock == null) {
+ LOGGER.info(
+ "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high",
+ tsFile.getPath());
+ deviceIsAlignedMap = null;
+ return false;
+ }
+
+ LOGGER.info("PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.", tsFile.getPath());
+ return true;
+ }
+
+ synchronized boolean cacheObjectsIfAbsent(final File tsFile) throws IOException {
+ if (allocatedMemoryBlock != null) {
+ if (deviceMeasurementsMap != null) {
+ return true;
+ } else {
+ // Recalculate it again because only deviceIsAligned map is cached
+ allocatedMemoryBlock.close();
+ allocatedMemoryBlock = null;
+ }
+ }
+
+ // See if pipe memory is sufficient to be allocated for TsFileSequenceReader.
+ // Only allocate when pipe memory used is less than 50%, because memory here
+ // is hard to shrink and may consume too much memory.
+ allocatedMemoryBlock =
+ PipeDataNodeResourceManager.memory()
+ .forceAllocateIfSufficient(
+ PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(),
+ MEMORY_SUFFICIENT_THRESHOLD);
+ if (allocatedMemoryBlock == null) {
+ LOGGER.info(
+ "Failed to cacheObjectsIfAbsent for tsfile {}, because memory usage is high",
+ tsFile.getPath());
+ return false;
+ }
+
+ long memoryRequiredInBytes = 0L;
+ try (TsFileSequenceReader sequenceReader =
+ new TsFileSequenceReader(tsFile.getPath(), true, true)) {
+ deviceMeasurementsMap = sequenceReader.getDeviceMeasurementsMap();
+ memoryRequiredInBytes +=
+ PipeMemoryWeightUtil.memoryOfIDeviceID2StrList(deviceMeasurementsMap);
+
+ if (Objects.isNull(deviceIsAlignedMap)) {
+ deviceIsAlignedMap = new HashMap<>();
+ final TsFileDeviceIterator deviceIsAlignedIterator =
+ sequenceReader.getAllDevicesIteratorWithIsAligned();
+ while (deviceIsAlignedIterator.hasNext()) {
+ final Pair<IDeviceID, Boolean> deviceIsAlignedPair = deviceIsAlignedIterator.next();
+ deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight());
+ }
+ }
+ memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap);
+
+ measurementDataTypeMap = sequenceReader.getFullPathDataTypeMap();
+ memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfStr2TSDataType(measurementDataTypeMap);
+ }
+ // Release memory of TsFileSequenceReader.
+ allocatedMemoryBlock.close();
+ allocatedMemoryBlock = null;
+
+ // Allocate again for the cached objects.
+ allocatedMemoryBlock =
+ PipeDataNodeResourceManager.memory()
+ .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD);
+ if (allocatedMemoryBlock == null) {
+ LOGGER.info(
+ "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high",
+ tsFile.getPath());
+ deviceIsAlignedMap = null;
+ deviceMeasurementsMap = null;
+ measurementDataTypeMap = null;
+ return false;
+ }
+
+ LOGGER.info("PipeTsFileResource: Cached objects for tsfile {}.", tsFile.getPath());
+ return true;
+ }
+}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java
index bf789b9..5065821 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java
@@ -19,73 +19,33 @@
package org.apache.iotdb.db.pipe.resource.tsfile;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
-import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
-import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock;
-import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil;
-import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
-import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus;
-
-import org.apache.tsfile.enums.TSDataType;
-import org.apache.tsfile.file.metadata.IDeviceID;
-import org.apache.tsfile.read.TsFileDeviceIterator;
-import org.apache.tsfile.read.TsFileSequenceReader;
-import org.apache.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
-import java.io.IOException;
import java.nio.file.Files;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
public class PipeTsFileResource implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileResource.class);
- public static final long TSFILE_MIN_TIME_TO_LIVE_IN_MS = 1000L * 20;
- public static final float MEMORY_SUFFICIENT_THRESHOLD = 0.7f;
-
private final File hardlinkOrCopiedFile;
- private final boolean isTsFile;
-
- /** this TsFileResource is used to track the {@link TsFileResourceStatus} of original TsFile. * */
- private final TsFileResource tsFileResource;
private volatile long fileSize = -1L;
private final AtomicInteger referenceCount;
- private final AtomicLong lastUnpinToZeroTime;
- private PipeMemoryBlock allocatedMemoryBlock;
- private Map<IDeviceID, List<String>> deviceMeasurementsMap = null;
- private Map<IDeviceID, Boolean> deviceIsAlignedMap = null;
- private Map<String, TSDataType> measurementDataTypeMap = null;
- public PipeTsFileResource(
- final File hardlinkOrCopiedFile,
- final boolean isTsFile,
- final TsFileResource tsFileResource) {
+ public PipeTsFileResource(final File hardlinkOrCopiedFile) {
this.hardlinkOrCopiedFile = hardlinkOrCopiedFile;
- this.isTsFile = isTsFile;
- this.tsFileResource = tsFileResource;
referenceCount = new AtomicInteger(1);
- lastUnpinToZeroTime = new AtomicLong(Long.MAX_VALUE);
}
public File getFile() {
return hardlinkOrCopiedFile;
}
- public boolean isOriginalTsFileDeleted() {
- return isTsFile && Objects.nonNull(tsFileResource) && tsFileResource.isDeleted();
- }
-
public long getFileSize() {
if (fileSize == -1L) {
synchronized (this) {
@@ -97,62 +57,30 @@
return fileSize;
}
- public long getTsFileResourceSize() {
- return Objects.nonNull(tsFileResource) ? tsFileResource.calculateRamSize() : 0;
- }
-
///////////////////// Reference Count /////////////////////
public int getReferenceCount() {
return referenceCount.get();
}
- public int increaseAndGetReference() {
- return referenceCount.addAndGet(1);
+ public void increaseReferenceCount() {
+ referenceCount.addAndGet(1);
}
- public int decreaseAndGetReference() {
+ public boolean decreaseReferenceCount() {
final int finalReferenceCount = referenceCount.addAndGet(-1);
if (finalReferenceCount == 0) {
- lastUnpinToZeroTime.set(System.currentTimeMillis());
+ close();
+ return true;
}
if (finalReferenceCount < 0) {
LOGGER.warn("PipeTsFileResource's reference count is decreased to below 0.");
}
- return finalReferenceCount;
- }
-
- public synchronized boolean closeIfOutOfTimeToLive() {
- if (referenceCount.get() <= 0
- && (deviceMeasurementsMap == null // Not cached yet.
- || System.currentTimeMillis() - lastUnpinToZeroTime.get()
- > TSFILE_MIN_TIME_TO_LIVE_IN_MS)) {
- close();
- return true;
- } else {
- return false;
- }
+ return false;
}
@Override
public synchronized void close() {
- if (deviceMeasurementsMap != null) {
- deviceMeasurementsMap = null;
- }
-
- if (deviceIsAlignedMap != null) {
- deviceIsAlignedMap = null;
- }
-
- if (measurementDataTypeMap != null) {
- measurementDataTypeMap = null;
- }
-
- if (allocatedMemoryBlock != null) {
- allocatedMemoryBlock.close();
- allocatedMemoryBlock = null;
- }
-
try {
Files.deleteIfExists(hardlinkOrCopiedFile.toPath());
} catch (final Exception e) {
@@ -165,166 +93,4 @@
LOGGER.info("PipeTsFileResource: Closed tsfile {} and cleaned up.", hardlinkOrCopiedFile);
}
-
- //////////////////////////// Cache Getter ////////////////////////////
-
- public synchronized Map<IDeviceID, List<String>> tryGetDeviceMeasurementsMap()
- throws IOException {
- if (deviceMeasurementsMap == null && isTsFile) {
- cacheObjectsIfAbsent();
- }
- return deviceMeasurementsMap;
- }
-
- public synchronized Map<IDeviceID, Boolean> tryGetDeviceIsAlignedMap(
- final boolean cacheOtherMetadata) throws IOException {
- if (deviceIsAlignedMap == null && isTsFile) {
- if (cacheOtherMetadata) {
- cacheObjectsIfAbsent();
- } else {
- cacheDeviceIsAlignedMapIfAbsent();
- }
- }
- return deviceIsAlignedMap;
- }
-
- public synchronized Map<String, TSDataType> tryGetMeasurementDataTypeMap() throws IOException {
- if (measurementDataTypeMap == null && isTsFile) {
- cacheObjectsIfAbsent();
- }
- return measurementDataTypeMap;
- }
-
- synchronized boolean cacheDeviceIsAlignedMapIfAbsent() throws IOException {
- if (!isTsFile) {
- return false;
- }
-
- if (allocatedMemoryBlock != null) {
- // This means objects are already cached.
- return true;
- }
-
- // See if pipe memory is sufficient to be allocated for TsFileSequenceReader.
- // Only allocate when pipe memory used is less than 50%, because memory here
- // is hard to shrink and may consume too much memory.
- allocatedMemoryBlock =
- PipeDataNodeResourceManager.memory()
- .forceAllocateIfSufficient(
- PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(),
- MEMORY_SUFFICIENT_THRESHOLD);
- if (allocatedMemoryBlock == null) {
- LOGGER.info(
- "Failed to cacheDeviceIsAlignedMapIfAbsent for tsfile {}, because memory usage is high",
- hardlinkOrCopiedFile.getPath());
- return false;
- }
-
- long memoryRequiredInBytes = 0L;
- try (TsFileSequenceReader sequenceReader =
- new TsFileSequenceReader(hardlinkOrCopiedFile.getPath(), true, false)) {
- deviceIsAlignedMap = new HashMap<>();
- final TsFileDeviceIterator deviceIsAlignedIterator =
- sequenceReader.getAllDevicesIteratorWithIsAligned();
- while (deviceIsAlignedIterator.hasNext()) {
- final Pair<IDeviceID, Boolean> deviceIsAlignedPair = deviceIsAlignedIterator.next();
- deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight());
- }
- memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap);
- }
- // Release memory of TsFileSequenceReader.
- allocatedMemoryBlock.close();
- allocatedMemoryBlock = null;
-
- // Allocate again for the cached objects.
- allocatedMemoryBlock =
- PipeDataNodeResourceManager.memory()
- .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD);
- if (allocatedMemoryBlock == null) {
- LOGGER.info(
- "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high",
- hardlinkOrCopiedFile.getPath());
- deviceIsAlignedMap = null;
- return false;
- }
-
- LOGGER.info(
- "PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.",
- hardlinkOrCopiedFile.getPath());
- return true;
- }
-
- synchronized boolean cacheObjectsIfAbsent() throws IOException {
- if (!isTsFile) {
- return false;
- }
-
- if (allocatedMemoryBlock != null) {
- if (deviceMeasurementsMap != null) {
- return true;
- } else {
- // Recalculate it again because only deviceIsAligned map is cached
- allocatedMemoryBlock.close();
- allocatedMemoryBlock = null;
- }
- }
-
- // See if pipe memory is sufficient to be allocated for TsFileSequenceReader.
- // Only allocate when pipe memory used is less than 50%, because memory here
- // is hard to shrink and may consume too much memory.
- allocatedMemoryBlock =
- PipeDataNodeResourceManager.memory()
- .forceAllocateIfSufficient(
- PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(),
- MEMORY_SUFFICIENT_THRESHOLD);
- if (allocatedMemoryBlock == null) {
- LOGGER.info(
- "Failed to cacheObjectsIfAbsent for tsfile {}, because memory usage is high",
- hardlinkOrCopiedFile.getPath());
- return false;
- }
-
- long memoryRequiredInBytes = 0L;
- try (TsFileSequenceReader sequenceReader =
- new TsFileSequenceReader(hardlinkOrCopiedFile.getPath(), true, true)) {
- deviceMeasurementsMap = sequenceReader.getDeviceMeasurementsMap();
- memoryRequiredInBytes +=
- PipeMemoryWeightUtil.memoryOfIDeviceID2StrList(deviceMeasurementsMap);
-
- if (Objects.isNull(deviceIsAlignedMap)) {
- deviceIsAlignedMap = new HashMap<>();
- final TsFileDeviceIterator deviceIsAlignedIterator =
- sequenceReader.getAllDevicesIteratorWithIsAligned();
- while (deviceIsAlignedIterator.hasNext()) {
- final Pair<IDeviceID, Boolean> deviceIsAlignedPair = deviceIsAlignedIterator.next();
- deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight());
- }
- }
- memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap);
-
- measurementDataTypeMap = sequenceReader.getFullPathDataTypeMap();
- memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfStr2TSDataType(measurementDataTypeMap);
- }
- // Release memory of TsFileSequenceReader.
- allocatedMemoryBlock.close();
- allocatedMemoryBlock = null;
-
- // Allocate again for the cached objects.
- allocatedMemoryBlock =
- PipeDataNodeResourceManager.memory()
- .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD);
- if (allocatedMemoryBlock == null) {
- LOGGER.info(
- "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high",
- hardlinkOrCopiedFile.getPath());
- deviceIsAlignedMap = null;
- deviceMeasurementsMap = null;
- measurementDataTypeMap = null;
- return false;
- }
-
- LOGGER.info(
- "PipeTsFileResource: Cached objects for tsfile {}.", hardlinkOrCopiedFile.getPath());
- return true;
- }
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java
index d00318a..37b3014 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java
@@ -22,8 +22,7 @@
import org.apache.iotdb.commons.conf.IoTDBConstant;
import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.utils.FileUtils;
-import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
-import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
+import org.apache.iotdb.commons.utils.TestOnly;
import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
@@ -32,89 +31,31 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
import java.io.File;
import java.io.IOException;
-import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
+import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
public class PipeTsFileResourceManager {
private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileResourceManager.class);
- private final Map<String, PipeTsFileResource> hardlinkOrCopiedFileToPipeTsFileResourceMap =
+ // This is used to hold the assigner pinned tsFiles.
+ // Also, it is used to provide metadata cache of the tsFile, and is shared by all the pipe's
+ // tsFiles.
+ private final Map<String, PipeTsFileMemResource> hardlinkOrCopiedFileToTsFileMemResourceMap =
new ConcurrentHashMap<>();
+
+ // PipeName -> TsFilePath -> PipeTsFileResource
+ private final Map<String, Map<String, PipeTsFileResource>>
+ hardlinkOrCopiedFileToPipeTsFileResourceMap = new ConcurrentHashMap<>();
private final PipeTsFileResourceSegmentLock segmentLock = new PipeTsFileResourceSegmentLock();
- public PipeTsFileResourceManager() {
- PipeDataNodeAgent.runtime()
- .registerPeriodicalJob(
- "PipeTsFileResourceManager#ttlCheck()",
- this::tryTtlCheck,
- Math.max(PipeTsFileResource.TSFILE_MIN_TIME_TO_LIVE_IN_MS / 1000, 1));
- }
-
- private void tryTtlCheck() {
- try {
- ttlCheck();
- } catch (final InterruptedException e) {
- Thread.currentThread().interrupt();
- LOGGER.warn("failed to try lock when checking TTL because of interruption", e);
- } catch (final Exception e) {
- LOGGER.warn("failed to check TTL of PipeTsFileResource: ", e);
- }
- }
-
- private void ttlCheck() throws InterruptedException {
- final Iterator<Map.Entry<String, PipeTsFileResource>> iterator =
- hardlinkOrCopiedFileToPipeTsFileResourceMap.entrySet().iterator();
- final long timeout =
- PipeConfig.getInstance().getPipeSubtaskExecutorCronHeartbeatEventIntervalSeconds() >> 1;
- final Optional<Logger> logger =
- PipeDataNodeResourceManager.log()
- .schedule(
- PipeTsFileResourceManager.class,
- PipeConfig.getInstance().getPipeTsFilePinMaxLogNumPerRound(),
- PipeConfig.getInstance().getPipeTsFilePinMaxLogIntervalRounds(),
- hardlinkOrCopiedFileToPipeTsFileResourceMap.size());
- final StringBuilder logBuilder = new StringBuilder();
- while (iterator.hasNext()) {
- final Map.Entry<String, PipeTsFileResource> entry = iterator.next();
-
- final String hardlinkOrCopiedFile = entry.getKey();
- if (!segmentLock.tryLock(new File(hardlinkOrCopiedFile), timeout, TimeUnit.SECONDS)) {
- LOGGER.warn(
- "failed to try lock when checking TTL for file {} because of timeout ({}s)",
- hardlinkOrCopiedFile,
- timeout);
- continue;
- }
-
- try {
- if (entry.getValue().closeIfOutOfTimeToLive()) {
- iterator.remove();
- } else {
- logBuilder.append(
- String.format(
- "<%s , %d times, %d bytes> ",
- entry.getKey(),
- entry.getValue().getReferenceCount(),
- entry.getValue().getFileSize()));
- }
- } catch (final Exception e) {
- LOGGER.warn("failed to close PipeTsFileResource when checking TTL: ", e);
- } finally {
- segmentLock.unlock(new File(hardlinkOrCopiedFile));
- }
- }
- if (logBuilder.length() > 0) {
- logger.ifPresent(l -> l.info("Pipe file {}are still referenced", logBuilder));
- }
- }
-
/**
* Given a file, create a hardlink or copy it to pipe dir, maintain a reference count for the
* hardlink or copied file, and return the hardlink or copied file.
@@ -131,19 +72,16 @@
* @param file tsfile, resource file or mod file. can be original file or hardlink/copy of
* original file
* @param isTsFile {@code true} to create hardlink, {@code false} to copy file
- * @param tsFileResource the TsFileResource of original TsFile. Ignored if {@param isTsFile} is
- * {@code false}.
* @return the hardlink or copied file
* @throws IOException when create hardlink or copy file failed
*/
public File increaseFileReference(
- final File file, final boolean isTsFile, final TsFileResource tsFileResource)
- throws IOException {
+ final File file, final boolean isTsFile, final @Nonnull String pipeName) throws IOException {
// If the file is already a hardlink or copied file,
// just increase reference count and return it
segmentLock.lock(file);
try {
- if (increaseReferenceIfExists(file)) {
+ if (increaseReferenceIfExists(file, isTsFile, pipeName)) {
return file;
}
} finally {
@@ -152,11 +90,12 @@
// If the file is not a hardlink or copied file, check if there is a related hardlink or
// copied file in pipe dir. if so, increase reference count and return it
- final File hardlinkOrCopiedFile = getHardlinkOrCopiedFileInPipeDir(file);
+ final File hardlinkOrCopiedFile = getHardlinkOrCopiedFileInPipeDir(file, pipeName);
segmentLock.lock(hardlinkOrCopiedFile);
try {
- if (increaseReferenceIfExists(hardlinkOrCopiedFile)) {
+ if (increaseReferenceIfExists(hardlinkOrCopiedFile, isTsFile, pipeName)) {
return hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
.get(hardlinkOrCopiedFile.getPath())
.getFile();
}
@@ -171,27 +110,55 @@
// If the file is not a hardlink or copied file, and there is no related hardlink or copied
// file in pipe dir, create a hardlink or copy it to pipe dir, maintain a reference count for
// the hardlink or copied file, and return the hardlink or copied file.
- hardlinkOrCopiedFileToPipeTsFileResourceMap.put(
- resultFile.getPath(), new PipeTsFileResource(resultFile, isTsFile, tsFileResource));
+ hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
+ .put(resultFile.getPath(), new PipeTsFileResource(resultFile));
+
+ increaseMemReference(resultFile, isTsFile);
+
return resultFile;
} finally {
segmentLock.unlock(hardlinkOrCopiedFile);
}
}
- private boolean increaseReferenceIfExists(final File file) {
+ private boolean increaseReferenceIfExists(
+ final File file, final boolean isTsFile, final @Nonnull String pipeName) {
+ final String path = file.getPath();
final PipeTsFileResource resource =
- hardlinkOrCopiedFileToPipeTsFileResourceMap.get(file.getPath());
+ hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
+ .get(path);
if (resource != null) {
- resource.increaseAndGetReference();
+ resource.increaseReferenceCount();
+ increaseMemReference(file, isTsFile);
return true;
}
return false;
}
- public static File getHardlinkOrCopiedFileInPipeDir(final File file) throws IOException {
+ private void increaseMemReference(final File file, final boolean isTsFile) {
+ if (!isTsFile) {
+ return;
+ }
+ // Increase the assigner's file to avoid hard-link or memory cache cleaning
+ // Note that it does not exist for historical files
+ hardlinkOrCopiedFileToTsFileMemResourceMap.compute(
+ getCommonFilePath(file),
+ (k, v) -> {
+ if (Objects.isNull(v)) {
+ return new PipeTsFileMemResource();
+ } else {
+ v.increaseReferenceCount();
+ return v;
+ }
+ });
+ }
+
+ public static File getHardlinkOrCopiedFileInPipeDir(final File file, final String pipeName)
+ throws IOException {
try {
- return new File(getPipeTsFileDirPath(file), getRelativeFilePath(file));
+ return new File(getPipeTsFileDirPath(file, pipeName), getRelativeFilePath(file));
} catch (final Exception e) {
throw new IOException(
String.format(
@@ -202,22 +169,27 @@
}
}
- private static String getPipeTsFileDirPath(File file) throws IOException {
+ private static String getPipeTsFileDirPath(File file, final String pipeName) throws IOException {
while (!file.getName().equals(IoTDBConstant.SEQUENCE_FOLDER_NAME)
- && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)) {
+ && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)
+ && !file.getName().equals(PipeConfig.getInstance().getPipeHardlinkBaseDirName())) {
file = file.getParentFile();
}
return file.getParentFile().getCanonicalPath()
+ File.separator
+ PipeConfig.getInstance().getPipeHardlinkBaseDirName()
+ File.separator
- + PipeConfig.getInstance().getPipeHardlinkTsFileDirName();
+ + PipeConfig.getInstance().getPipeHardlinkTsFileDirName()
+ + (Objects.nonNull(pipeName) ? File.separator + pipeName : "");
}
private static String getRelativeFilePath(File file) {
StringBuilder builder = new StringBuilder(file.getName());
while (!file.getName().equals(IoTDBConstant.SEQUENCE_FOLDER_NAME)
- && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)) {
+ && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)
+ && !file.getParentFile()
+ .getName()
+ .equals(PipeConfig.getInstance().getPipeHardlinkTsFileDirName())) {
file = file.getParentFile();
builder =
new StringBuilder(file.getName())
@@ -231,32 +203,70 @@
* Given a hardlink or copied file, decrease its reference count, if the reference count is 0,
* delete the file. if the given file is not a hardlink or copied file, do nothing.
*
- * @param hardlinkOrCopiedFile the copied or hardlinked file
+ * @param hardlinkOrCopiedFile the copied or hard-linked file
*/
- public void decreaseFileReference(final File hardlinkOrCopiedFile) {
+ public void decreaseFileReference(
+ final File hardlinkOrCopiedFile, final @Nonnull String pipeName) {
segmentLock.lock(hardlinkOrCopiedFile);
try {
final String filePath = hardlinkOrCopiedFile.getPath();
- final PipeTsFileResource resource = hardlinkOrCopiedFileToPipeTsFileResourceMap.get(filePath);
- if (resource != null) {
- resource.decreaseAndGetReference();
+ final PipeTsFileResource resource =
+ hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
+ .get(filePath);
+ if (resource != null && resource.decreaseReferenceCount()) {
+ hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
+ .remove(filePath);
}
+ // Decrease the assigner's file to clear hard-link and memory cache
+ // Note that it does not exist for historical files
+ decreaseMemReferenceIfExists(hardlinkOrCopiedFile);
} finally {
segmentLock.unlock(hardlinkOrCopiedFile);
}
}
+ private void decreaseMemReferenceIfExists(final File file) {
+ // Increase the assigner's file to avoid hard-link or memory cache cleaning
+ // Note that it does not exist for historical files
+ final String commonFilePath = getCommonFilePath(file);
+ if (hardlinkOrCopiedFileToTsFileMemResourceMap.containsKey(commonFilePath)
+ && hardlinkOrCopiedFileToTsFileMemResourceMap
+ .get(commonFilePath)
+ .decreaseReferenceCount()) {
+ hardlinkOrCopiedFileToPipeTsFileResourceMap.remove(commonFilePath);
+ }
+ }
+
+ // Warning: Shall not be called by the assigner
+ private String getCommonFilePath(final @Nonnull File file) {
+ // If the parent or grandparent is null then this is testing scenario
+ // Skip the "pipeName" of this file
+ return Objects.isNull(file.getParentFile())
+ || Objects.isNull(file.getParentFile().getParentFile())
+ ? file.getPath()
+ : file.getParentFile().getParent() + File.separator + file.getName();
+ }
+
/**
* Get the reference count of the file.
*
* @param hardlinkOrCopiedFile the copied or hardlinked file
* @return the reference count of the file
*/
- public int getFileReferenceCount(final File hardlinkOrCopiedFile) {
+ @TestOnly
+ public int getFileReferenceCount(
+ final File hardlinkOrCopiedFile, final @Nullable String pipeName) {
segmentLock.lock(hardlinkOrCopiedFile);
try {
- final String filePath = hardlinkOrCopiedFile.getPath();
- final PipeTsFileResource resource = hardlinkOrCopiedFileToPipeTsFileResourceMap.get(filePath);
+ final PipeTsFileResource resource =
+ Objects.nonNull(pipeName)
+ ? hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
+ .get(hardlinkOrCopiedFile.getPath())
+ : hardlinkOrCopiedFileToTsFileMemResourceMap.get(
+ getCommonFilePath(hardlinkOrCopiedFile));
return resource != null ? resource.getReferenceCount() : 0;
} finally {
segmentLock.unlock(hardlinkOrCopiedFile);
@@ -272,9 +282,13 @@
public boolean cacheObjectsIfAbsent(final File hardlinkOrCopiedTsFile) throws IOException {
segmentLock.lock(hardlinkOrCopiedTsFile);
try {
- final PipeTsFileResource resource =
- hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath());
- return resource != null && resource.cacheObjectsIfAbsent();
+ if (hardlinkOrCopiedTsFile.getParentFile() == null
+ || hardlinkOrCopiedTsFile.getParentFile().getParentFile() == null) {
+ return false;
+ }
+ final PipeTsFileMemResource resource =
+ hardlinkOrCopiedFileToTsFileMemResourceMap.get(getCommonFilePath(hardlinkOrCopiedTsFile));
+ return resource != null && resource.cacheObjectsIfAbsent(hardlinkOrCopiedTsFile);
} finally {
segmentLock.unlock(hardlinkOrCopiedTsFile);
}
@@ -284,9 +298,9 @@
final File hardlinkOrCopiedTsFile) throws IOException {
segmentLock.lock(hardlinkOrCopiedTsFile);
try {
- final PipeTsFileResource resource =
- hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath());
- return resource == null ? null : resource.tryGetDeviceMeasurementsMap();
+ final PipeTsFileMemResource resource =
+ hardlinkOrCopiedFileToTsFileMemResourceMap.get(getCommonFilePath(hardlinkOrCopiedTsFile));
+ return resource == null ? null : resource.tryGetDeviceMeasurementsMap(hardlinkOrCopiedTsFile);
} finally {
segmentLock.unlock(hardlinkOrCopiedTsFile);
}
@@ -296,9 +310,11 @@
final File hardlinkOrCopiedTsFile, final boolean cacheOtherMetadata) throws IOException {
segmentLock.lock(hardlinkOrCopiedTsFile);
try {
- final PipeTsFileResource resource =
- hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath());
- return resource == null ? null : resource.tryGetDeviceIsAlignedMap(cacheOtherMetadata);
+ final PipeTsFileMemResource resource =
+ hardlinkOrCopiedFileToTsFileMemResourceMap.get(getCommonFilePath(hardlinkOrCopiedTsFile));
+ return resource == null
+ ? null
+ : resource.tryGetDeviceIsAlignedMap(cacheOtherMetadata, hardlinkOrCopiedTsFile);
} finally {
segmentLock.unlock(hardlinkOrCopiedTsFile);
}
@@ -308,38 +324,47 @@
final File hardlinkOrCopiedTsFile) throws IOException {
segmentLock.lock(hardlinkOrCopiedTsFile);
try {
- final PipeTsFileResource resource =
- hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath());
- return resource == null ? null : resource.tryGetMeasurementDataTypeMap();
+ final PipeTsFileMemResource resource =
+ hardlinkOrCopiedFileToTsFileMemResourceMap.get(getCommonFilePath(hardlinkOrCopiedTsFile));
+ return resource == null
+ ? null
+ : resource.tryGetMeasurementDataTypeMap(hardlinkOrCopiedTsFile);
} finally {
segmentLock.unlock(hardlinkOrCopiedTsFile);
}
}
- public void pinTsFileResource(final TsFileResource resource, final boolean withMods)
+ public void pinTsFileResource(
+ final TsFileResource resource, final boolean withMods, final String pipeName)
throws IOException {
- increaseFileReference(resource.getTsFile(), true, resource);
+ increaseFileReference(resource.getTsFile(), true, pipeName);
if (withMods && resource.getModFile().exists()) {
- increaseFileReference(new File(resource.getModFile().getFilePath()), false, null);
+ increaseFileReference(new File(resource.getModFile().getFilePath()), false, pipeName);
}
}
- public void unpinTsFileResource(final TsFileResource resource) throws IOException {
- final File pinnedFile = getHardlinkOrCopiedFileInPipeDir(resource.getTsFile());
- decreaseFileReference(pinnedFile);
+ public void unpinTsFileResource(final TsFileResource resource, final String pipeName)
+ throws IOException {
+ final File pinnedFile = getHardlinkOrCopiedFileInPipeDir(resource.getTsFile(), pipeName);
+ decreaseFileReference(pinnedFile, pipeName);
final File modFile = new File(pinnedFile + ModificationFile.FILE_SUFFIX);
if (modFile.exists()) {
- decreaseFileReference(modFile);
+ decreaseFileReference(modFile, pipeName);
}
}
- public int getLinkedTsfileCount() {
- return hardlinkOrCopiedFileToPipeTsFileResourceMap.size();
+ public int getLinkedTsFileCount(final @Nonnull String pipeName) {
+ return hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
+ .size();
}
- public long getTotalLinkedTsfileSize() {
- return hardlinkOrCopiedFileToPipeTsFileResourceMap.values().stream()
+ public long getTotalLinkedTsFileSize(final @Nonnull String pipeName) {
+ return hardlinkOrCopiedFileToPipeTsFileResourceMap
+ .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>())
+ .values()
+ .stream()
.mapToLong(
resource -> {
try {
@@ -351,47 +376,4 @@
})
.sum();
}
-
- /**
- * Get the total size of linked TsFiles whose original TsFile is deleted (by compaction or else)
- */
- public long getTotalLinkedButDeletedTsfileSize() {
- try {
- return hardlinkOrCopiedFileToPipeTsFileResourceMap.values().parallelStream()
- .filter(PipeTsFileResource::isOriginalTsFileDeleted)
- .mapToLong(
- resource -> {
- try {
- return resource.getFileSize();
- } catch (Exception e) {
- LOGGER.warn(
- "failed to get file size of linked but deleted TsFile {}: ", resource, e);
- return 0;
- }
- })
- .sum();
- } catch (final Exception e) {
- LOGGER.warn("failed to get total size of linked but deleted TsFiles: ", e);
- return 0;
- }
- }
-
- public long getTotalLinkedButDeletedTsFileResourceRamSize() {
- long totalLinkedButDeletedTsfileResourceRamSize = 0;
- try {
- for (final Map.Entry<String, PipeTsFileResource> resourceEntry :
- hardlinkOrCopiedFileToPipeTsFileResourceMap.entrySet()) {
- final PipeTsFileResource pipeTsFileResource = resourceEntry.getValue();
- // If the original TsFile is not deleted, the memory of the resource is not counted
- // because the memory of the resource is controlled by TsFileResourceManager.
- if (pipeTsFileResource.isOriginalTsFileDeleted()) {
- totalLinkedButDeletedTsfileResourceRamSize += pipeTsFileResource.getTsFileResourceSize();
- }
- }
- return totalLinkedButDeletedTsfileResourceRamSize;
- } catch (final Exception e) {
- LOGGER.warn("failed to get total size of linked but deleted TsFiles resource ram size: ", e);
- return totalLinkedButDeletedTsfileResourceRamSize;
- }
- }
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java
deleted file mode 100644
index 9d1e530..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.resource.wal;
-
-import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException;
-import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-public abstract class PipeWALResource implements Closeable {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALResource.class);
-
- protected final WALEntryHandler walEntryHandler;
-
- private final AtomicInteger referenceCount;
-
- public static final long WAL_MIN_TIME_TO_LIVE_IN_MS = 1000L * 20;
- private final AtomicLong lastLogicalPinTime;
- private final AtomicBoolean isPhysicallyPinned;
-
- protected PipeWALResource(WALEntryHandler walEntryHandler) {
- this.walEntryHandler = walEntryHandler;
-
- referenceCount = new AtomicInteger(0);
-
- lastLogicalPinTime = new AtomicLong(0);
- isPhysicallyPinned = new AtomicBoolean(false);
- }
-
- public final void pin() throws PipeRuntimeNonCriticalException {
- if (referenceCount.get() == 0) {
- if (!isPhysicallyPinned.get()) {
- try {
- pinInternal();
- } catch (MemTablePinException e) {
- throw new PipeRuntimeNonCriticalException(
- String.format(
- "failed to pin wal %d, because %s",
- walEntryHandler.getMemTableId(), e.getMessage()));
- }
- isPhysicallyPinned.set(true);
- LOGGER.info("wal {} is pinned by pipe engine", walEntryHandler.getMemTableId());
- } // else means the wal is already pinned, do nothing
-
- // no matter the wal is pinned or not, update the last pin time
- lastLogicalPinTime.set(System.currentTimeMillis());
- }
-
- referenceCount.incrementAndGet();
- }
-
- protected abstract void pinInternal()
- throws MemTablePinException, PipeRuntimeNonCriticalException;
-
- public final void unpin() throws PipeRuntimeNonCriticalException {
- final int finalReferenceCount = referenceCount.get();
-
- if (finalReferenceCount == 1) {
- unpinPhysicallyIfOutOfTimeToLive();
- } else if (finalReferenceCount < 1) {
- throw new PipeRuntimeCriticalException(
- String.format(
- "wal %d is unpinned more than pinned, this should not happen",
- walEntryHandler.getMemTableId()));
- }
-
- referenceCount.decrementAndGet();
- }
-
- protected abstract void unpinInternal()
- throws MemTablePinException, PipeRuntimeNonCriticalException;
-
- /**
- * Invalidate the wal if it is unpinned and out of time to live.
- *
- * @return true if the wal is invalidated, false otherwise
- */
- public final boolean invalidateIfPossible() {
- if (referenceCount.get() > 0) {
- return false;
- }
-
- // referenceCount.get() == 0
- return unpinPhysicallyIfOutOfTimeToLive();
- }
-
- /**
- * Unpin the wal if it is out of time to live.
- *
- * @return true if the wal is unpinned physically (then it can be invalidated), false otherwise
- * @throws PipeRuntimeNonCriticalException if failed to unpin WAL of memtable.
- */
- private boolean unpinPhysicallyIfOutOfTimeToLive() {
- if (isPhysicallyPinned.get()) {
- if (System.currentTimeMillis() - lastLogicalPinTime.get() > WAL_MIN_TIME_TO_LIVE_IN_MS) {
- try {
- unpinInternal();
- } catch (MemTablePinException e) {
- throw new PipeRuntimeNonCriticalException(
- String.format(
- "failed to unpin wal %d, because %s",
- walEntryHandler.getMemTableId(), e.getMessage()));
- }
- isPhysicallyPinned.set(false);
- LOGGER.info(
- "wal {} is unpinned by pipe engine when checking time to live",
- walEntryHandler.getMemTableId());
- return true;
- } else {
- return false;
- }
- } else {
- LOGGER.info(
- "wal {} is not pinned physically when checking time to live",
- walEntryHandler.getMemTableId());
- return true;
- }
- }
-
- @Override
- public final void close() {
- if (isPhysicallyPinned.get()) {
- try {
- unpinInternal();
- } catch (MemTablePinException e) {
- LOGGER.error(
- "failed to unpin wal {} when closing pipe wal resource, because {}",
- walEntryHandler.getMemTableId(),
- e.getMessage());
- }
- isPhysicallyPinned.set(false);
- LOGGER.info(
- "wal {} is unpinned by pipe engine when closing pipe wal resource",
- walEntryHandler.getMemTableId());
- }
-
- referenceCount.set(0);
- }
-
- public int getReferenceCount() {
- return referenceCount.get();
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java
deleted file mode 100644
index 9c51d79..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.resource.wal;
-
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
-import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
-import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ConcurrentModificationException;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.ReentrantLock;
-
-public abstract class PipeWALResourceManager {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALResourceManager.class);
-
- protected final Map<Long, PipeWALResource> memtableIdToPipeWALResourceMap;
-
- private static final int SEGMENT_LOCK_COUNT = 32;
- private final ReentrantLock[] memTableIdSegmentLocks;
-
- protected PipeWALResourceManager() {
- // memTableIdToPipeWALResourceMap can be concurrently accessed by multiple threads
- memtableIdToPipeWALResourceMap = new ConcurrentHashMap<>();
-
- memTableIdSegmentLocks = new ReentrantLock[SEGMENT_LOCK_COUNT];
- for (int i = 0; i < SEGMENT_LOCK_COUNT; i++) {
- memTableIdSegmentLocks[i] = new ReentrantLock();
- }
-
- PipeDataNodeAgent.runtime()
- .registerPeriodicalJob(
- "PipeWALResourceManager#ttlCheck()",
- this::ttlCheck,
- Math.max(PipeWALResource.WAL_MIN_TIME_TO_LIVE_IN_MS / 1000, 1));
- }
-
- @SuppressWarnings("java:S2222")
- private void ttlCheck() {
- final Iterator<Map.Entry<Long, PipeWALResource>> iterator =
- memtableIdToPipeWALResourceMap.entrySet().iterator();
- final Optional<Logger> logger =
- PipeDataNodeResourceManager.log()
- .schedule(
- PipeWALResourceManager.class,
- PipeConfig.getInstance().getPipeWalPinMaxLogNumPerRound(),
- PipeConfig.getInstance().getPipeWalPinMaxLogIntervalRounds(),
- memtableIdToPipeWALResourceMap.size());
-
- final StringBuilder logBuilder = new StringBuilder();
- try {
- while (iterator.hasNext()) {
- final Map.Entry<Long, PipeWALResource> entry = iterator.next();
- final ReentrantLock lock =
- memTableIdSegmentLocks[(int) (entry.getKey() % SEGMENT_LOCK_COUNT)];
-
- lock.lock();
- try {
- if (entry.getValue().invalidateIfPossible()) {
- iterator.remove();
- } else {
- logBuilder.append(
- String.format(
- "<%d , %d times> ", entry.getKey(), entry.getValue().getReferenceCount()));
- }
- } finally {
- lock.unlock();
- }
- }
- } catch (final ConcurrentModificationException e) {
- LOGGER.error(
- "Concurrent modification issues happened, skipping the WAL in this round of ttl check",
- e);
- } finally {
- if (logBuilder.length() > 0) {
- logger.ifPresent(l -> l.info("WAL {}are still referenced", logBuilder));
- }
- }
- }
-
- public final void pin(final WALEntryHandler walEntryHandler) throws IOException {
- final long memTableId = walEntryHandler.getMemTableId();
- final ReentrantLock lock = memTableIdSegmentLocks[(int) (memTableId % SEGMENT_LOCK_COUNT)];
-
- lock.lock();
- try {
- pinInternal(memTableId, walEntryHandler);
- } finally {
- lock.unlock();
- }
- }
-
- protected abstract void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler)
- throws IOException;
-
- public final void unpin(final WALEntryHandler walEntryHandler) throws IOException {
- final long memTableId = walEntryHandler.getMemTableId();
- final ReentrantLock lock = memTableIdSegmentLocks[(int) (memTableId % SEGMENT_LOCK_COUNT)];
-
- lock.lock();
- try {
- unpinInternal(memTableId, walEntryHandler);
- } finally {
- lock.unlock();
- }
- }
-
- protected abstract void unpinInternal(
- final long memTableId, final WALEntryHandler walEntryHandler) throws IOException;
-
- public int getPinnedWalCount() {
- return Objects.nonNull(memtableIdToPipeWALResourceMap)
- ? memtableIdToPipeWALResourceMap.size()
- : 0;
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java
deleted file mode 100644
index f1ad513..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.resource.wal.hardlink;
-
-import org.apache.iotdb.db.pipe.resource.wal.PipeWALResource;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-
-public class PipeWALHardlinkResource extends PipeWALResource {
-
- private final PipeWALHardlinkResourceManager resourceManager;
-
- protected PipeWALHardlinkResource(
- WALEntryHandler walEntryHandler, PipeWALHardlinkResourceManager resourceManager) {
- super(walEntryHandler);
- this.resourceManager = resourceManager;
- }
-
- @Override
- protected void pinInternal() throws MemTablePinException {
- // TODO: hardlink
- walEntryHandler.pinMemTable();
- }
-
- @Override
- protected void unpinInternal() throws MemTablePinException {
- // TODO: hardlink
- walEntryHandler.unpinMemTable();
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java
deleted file mode 100644
index eebf766..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.resource.wal.hardlink;
-
-import org.apache.iotdb.commons.conf.IoTDBConstant;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
-import org.apache.iotdb.commons.utils.FileUtils;
-import org.apache.iotdb.commons.utils.TestOnly;
-import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.util.HashMap;
-import java.util.Map;
-
-public class PipeWALHardlinkResourceManager extends PipeWALResourceManager {
-
- @Override
- protected void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) {
- memtableIdToPipeWALResourceMap
- .computeIfAbsent(memTableId, id -> new PipeWALHardlinkResource(walEntryHandler, this))
- .pin();
- }
-
- @Override
- protected void unpinInternal(final long memTableId, final WALEntryHandler walEntryHandler) {
- memtableIdToPipeWALResourceMap.get(memTableId).unpin();
- }
-
- //////////////////////////// hardlink related ////////////////////////////
-
- private final Map<String, Integer> hardlinkToReferenceMap = new HashMap<>();
-
- /**
- * given a file, create a hardlink, maintain a reference count for the hardlink, and return the
- * hardlink.
- *
- * <p>if the given file is already a hardlink, increase its reference count and return it.
- *
- * <p>if the given file is a wal, create a hardlink in pipe dir, increase the reference count of
- * the hardlink and return it.
- *
- * @param file wal file. can be original file or the hardlink of original file
- * @return the hardlink
- * @throws IOException when create hardlink failed
- */
- public synchronized File increaseFileReference(final File file) throws IOException {
- // if the file is already a hardlink, just increase reference count and return it
- if (increaseReferenceIfExists(file.getPath())) {
- return file;
- }
-
- // if the file is not a hardlink, check if there is a related hardlink in pipe dir. if so,
- // increase reference count and return it.
- final File hardlink = getHardlinkInPipeWALDir(file);
- if (increaseReferenceIfExists(hardlink.getPath())) {
- return hardlink;
- }
-
- // if the file is a wal, and there is no related hardlink in pipe dir, create a hardlink to pipe
- // dir, maintain a reference count for the hardlink, and return the hardlink.
- hardlinkToReferenceMap.put(hardlink.getPath(), 1);
- return FileUtils.createHardLink(file, hardlink);
- }
-
- private boolean increaseReferenceIfExists(final String path) {
- hardlinkToReferenceMap.computeIfPresent(path, (k, v) -> v + 1);
- return hardlinkToReferenceMap.containsKey(path);
- }
-
- // TODO: Check me! Make sure the file is not a hardlink.
- // TODO: IF user specify a wal by config, will the method work?
- private static File getHardlinkInPipeWALDir(final File file) throws IOException {
- try {
- return new File(getPipeWALDirPath(file), getRelativeFilePath(file));
- } catch (final Exception e) {
- throw new IOException(
- String.format(
- "failed to get hardlink in pipe dir " + "for file %s, it is not a wal",
- file.getPath()),
- e);
- }
- }
-
- private static String getPipeWALDirPath(File file) throws IOException {
- while (!file.getName().equals(IoTDBConstant.WAL_FOLDER_NAME)) {
- file = file.getParentFile();
- }
-
- return file.getParentFile().getCanonicalPath()
- + File.separator
- + IoTDBConstant.DATA_FOLDER_NAME
- + File.separator
- + PipeConfig.getInstance().getPipeHardlinkBaseDirName()
- + File.separator
- + PipeConfig.getInstance().getPipeHardlinkWALDirName();
- }
-
- private static String getRelativeFilePath(File file) {
- StringBuilder builder = new StringBuilder(file.getName());
- while (!file.getParentFile().getName().equals(IoTDBConstant.WAL_FOLDER_NAME)) {
- file = file.getParentFile();
- builder =
- new StringBuilder(file.getName())
- .append(IoTDBConstant.FILE_NAME_SEPARATOR)
- .append(builder);
- }
- return builder.toString();
- }
-
- /**
- * given a hardlink, decrease its reference count, if the reference count is 0, delete the file.
- * if the given file is not a hardlink, do nothing.
- *
- * @param hardlink the hardlinked file
- * @throws IOException when delete file failed
- */
- public synchronized void decreaseFileReference(final File hardlink) throws IOException {
- final Integer updatedReference =
- hardlinkToReferenceMap.computeIfPresent(
- hardlink.getPath(), (file, reference) -> reference - 1);
-
- if (updatedReference != null && updatedReference == 0) {
- Files.deleteIfExists(hardlink.toPath());
- hardlinkToReferenceMap.remove(hardlink.getPath());
- }
- }
-
- @TestOnly
- public synchronized int getFileReferenceCount(final File hardlink) {
- return hardlinkToReferenceMap.getOrDefault(hardlink.getPath(), 0);
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java
deleted file mode 100644
index e8e03e6..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.resource.wal.selfhost;
-
-import org.apache.iotdb.db.pipe.resource.wal.PipeWALResource;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-
-public class PipeWALSelfHostResource extends PipeWALResource {
-
- public PipeWALSelfHostResource(WALEntryHandler walEntryHandler) {
- super(walEntryHandler);
- }
-
- @Override
- protected void pinInternal() throws MemTablePinException {
- walEntryHandler.pinMemTable();
- }
-
- @Override
- protected void unpinInternal() throws MemTablePinException {
- walEntryHandler.unpinMemTable();
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java
deleted file mode 100644
index c7fe0ac..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.resource.wal.selfhost;
-
-import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-
-public class PipeWALSelfHostResourceManager extends PipeWALResourceManager {
-
- @Override
- protected void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) {
- memtableIdToPipeWALResourceMap
- .computeIfAbsent(memTableId, id -> new PipeWALSelfHostResource(walEntryHandler))
- .pin();
- }
-
- @Override
- protected void unpinInternal(final long memTableId, final WALEntryHandler walEntryHandler) {
- memtableIdToPipeWALResourceMap.get(memTableId).unpin();
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java
index 9eb5446..0be487a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java
@@ -40,6 +40,9 @@
import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot;
import org.apache.iotdb.commons.client.request.AsyncRequestContext;
import org.apache.iotdb.commons.cluster.NodeStatus;
+import org.apache.iotdb.commons.concurrent.IoTThreadFactory;
+import org.apache.iotdb.commons.concurrent.ThreadName;
+import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor;
import org.apache.iotdb.commons.conf.CommonConfig;
import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.commons.conf.ConfigurationFileUtils;
@@ -272,9 +275,17 @@
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -316,6 +327,18 @@
private final CommonConfig commonConfig = CommonDescriptor.getInstance().getConfig();
+ private final ExecutorService schemaExecutor =
+ new WrappedThreadPoolExecutor(
+ 0,
+ IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount(),
+ 0L,
+ TimeUnit.SECONDS,
+ new ArrayBlockingQueue<>(
+ IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount()),
+ new IoTThreadFactory(ThreadName.SCHEMA_PARALLEL_POOL.getName()),
+ ThreadName.SCHEMA_PARALLEL_POOL.getName(),
+ new ThreadPoolExecutor.CallerRunsPolicy());
+
private static final String SYSTEM = "system";
public DataNodeInternalRPCServiceImpl() {
@@ -1071,6 +1094,10 @@
.map(PipeMeta::deserialize4TaskAgent)
.collect(Collectors.toList()));
+ if (Objects.isNull(exceptionMessages)) {
+ return new TPushPipeMetaResp()
+ .setStatus(new TSStatus(TSStatusCode.PIPE_PUSH_META_TIMEOUT.getStatusCode()));
+ }
return exceptionMessages.isEmpty()
? new TPushPipeMetaResp()
.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()))
@@ -1347,16 +1374,31 @@
final List<TSStatus> statusList = Collections.synchronizedList(new ArrayList<>());
final AtomicBoolean hasFailure = new AtomicBoolean(false);
- consensusGroupIdList.parallelStream()
- .forEach(
- consensusGroupId -> {
- final TSStatus status = executeOnOneRegion.apply(consensusGroupId);
- if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
- && status.getCode() != TSStatusCode.ONLY_LOGICAL_VIEW.getStatusCode()) {
- hasFailure.set(true);
- }
- statusList.add(status);
- });
+ final Set<Future<?>> schemaFuture = new HashSet<>();
+
+ consensusGroupIdList.forEach(
+ consensusGroupId ->
+ schemaFuture.add(
+ schemaExecutor.submit(
+ () -> {
+ final TSStatus status = executeOnOneRegion.apply(consensusGroupId);
+ if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()
+ && status.getCode() != TSStatusCode.ONLY_LOGICAL_VIEW.getStatusCode()) {
+ hasFailure.set(true);
+ }
+ statusList.add(status);
+ })));
+
+ for (final Future<?> future : schemaFuture) {
+ try {
+ future.get();
+ } catch (final ExecutionException | InterruptedException e) {
+ LOGGER.warn("Exception occurs when executing internal schema task: ", e);
+ statusList.add(
+ new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode())
+ .setMessage(e.toString()));
+ }
+ }
if (hasFailure.get()) {
return RpcUtils.getStatus(statusList);
@@ -1375,15 +1417,30 @@
final List<TSStatus> statusList = Collections.synchronizedList(new ArrayList<>());
final AtomicBoolean hasFailure = new AtomicBoolean(false);
- consensusGroupIdList.parallelStream()
- .forEach(
- consensusGroupId -> {
- final TSStatus status = executeOnOneRegion.apply(consensusGroupId);
- if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- hasFailure.set(true);
- }
- statusList.add(status);
- });
+ final Set<Future<?>> schemaFuture = new HashSet<>();
+
+ consensusGroupIdList.forEach(
+ consensusGroupId ->
+ schemaFuture.add(
+ schemaExecutor.submit(
+ () -> {
+ final TSStatus status = executeOnOneRegion.apply(consensusGroupId);
+ if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
+ hasFailure.set(true);
+ }
+ statusList.add(status);
+ })));
+
+ for (final Future<?> future : schemaFuture) {
+ try {
+ future.get();
+ } catch (final ExecutionException | InterruptedException e) {
+ LOGGER.warn("Exception occurs when executing internal schema task: ", e);
+ statusList.add(
+ new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode())
+ .setMessage(e.toString()));
+ }
+ }
if (hasFailure.get()) {
return RpcUtils.getStatus(statusList);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/pipe/ShowPipeTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/pipe/ShowPipeTask.java
index 687a438..16ee005 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/pipe/ShowPipeTask.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/pipe/ShowPipeTask.java
@@ -20,7 +20,7 @@
package org.apache.iotdb.db.queryengine.plan.execution.config.sys.pipe;
import org.apache.iotdb.confignode.rpc.thrift.TShowPipeInfo;
-import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeMetrics;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.queryengine.common.header.ColumnHeader;
import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant;
import org.apache.iotdb.db.queryengine.common.header.DatasetHeader;
@@ -97,7 +97,7 @@
if (remainingEventCount == -1 && remainingTime == -1) {
final Pair<Long, Double> remainingEventAndTime =
- PipeDataNodeRemainingEventAndTimeMetrics.getInstance()
+ PipeDataNodeSinglePipeMetrics.getInstance()
.getRemainingEventAndTime(tPipeInfo.getId(), tPipeInfo.getCreationTime());
remainingEventCount = remainingEventAndTime.getLeft();
remainingTime = remainingEventAndTime.getRight();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/DataNodeShutdownHook.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/DataNodeShutdownHook.java
index 13b87f5..3c4cc1b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/DataNodeShutdownHook.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/DataNodeShutdownHook.java
@@ -25,10 +25,14 @@
import org.apache.iotdb.commons.concurrent.ThreadName;
import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.commons.pipe.agent.runtime.PipePeriodicalJobExecutor;
+import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.consensus.ConsensusFactory;
import org.apache.iotdb.consensus.exception.ConsensusException;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.consensus.DataRegionConsensusImpl;
+import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeRemainingEventAndTimeOperator;
+import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics;
import org.apache.iotdb.db.protocol.client.ConfigNodeClient;
import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager;
import org.apache.iotdb.db.protocol.client.ConfigNodeInfo;
@@ -42,6 +46,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.Map;
+
public class DataNodeShutdownHook extends Thread {
private static final Logger logger = LoggerFactory.getLogger(DataNodeShutdownHook.class);
@@ -86,6 +92,27 @@
triggerSnapshotForAllDataRegion();
}
+ long startTime = System.currentTimeMillis();
+ if (PipeDataNodeAgent.task().getPipeCount() != 0) {
+ for (Map.Entry<String, PipeDataNodeRemainingEventAndTimeOperator> entry :
+ PipeDataNodeSinglePipeMetrics.getInstance().remainingEventAndTimeOperatorMap.entrySet()) {
+ while (entry.getValue().getRemainingNonHeartbeatEvents() > 0) {
+ if (System.currentTimeMillis() - startTime
+ > PipeConfig.getInstance().getPipeMaxWaitFinishTime()) {
+ break;
+ }
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ logger.info("Interrupted when waiting for pipe to finish");
+ }
+ }
+ }
+ }
+ // Persist progress index before shutdown to accurate recovery after restart
+ PipeDataNodeAgent.task().persistAllProgressIndexLocally();
+
// Shutdown pipe progressIndex background service
PipePeriodicalJobExecutor.shutdownBackgroundService();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java
index f923fb2..6fbf870 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java
@@ -38,7 +38,6 @@
import org.apache.iotdb.commons.exception.ShutdownException;
import org.apache.iotdb.commons.exception.StartupException;
import org.apache.iotdb.commons.file.SystemFileFactory;
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
import org.apache.iotdb.commons.schema.ttl.TTLCache;
import org.apache.iotdb.commons.service.IService;
import org.apache.iotdb.commons.service.ServiceType;
@@ -54,7 +53,6 @@
import org.apache.iotdb.db.exception.WriteProcessRejectException;
import org.apache.iotdb.db.exception.load.LoadReadOnlyException;
import org.apache.iotdb.db.exception.runtime.StorageEngineFailureException;
-import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
import org.apache.iotdb.db.queryengine.plan.analyze.cache.schema.DataNodeTTLCache;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.load.LoadTsFilePieceNode;
import org.apache.iotdb.db.queryengine.plan.scheduler.load.LoadTsFileScheduler;
@@ -219,12 +217,6 @@
LOGGER.info(
"Storage Engine recover cost: {}s.",
(System.currentTimeMillis() - startRecoverTime) / 1000);
-
- PipeDataNodeAgent.runtime()
- .registerPeriodicalJob(
- "StorageEngine#operateFlush",
- () -> operateFlush(new TFlushReq()),
- PipeConfig.getInstance().getPipeStorageEngineFlushTimeIntervalMs() / 1000);
},
ThreadName.STORAGE_ENGINE_RECOVER_TRIGGER.getName());
recoverEndTrigger.start();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java
index 5e9b25b..0d2737a 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java
@@ -1898,7 +1898,7 @@
}
/** close all working tsfile processors */
- private List<Future<?>> asyncCloseAllWorkingTsFileProcessors() {
+ public List<Future<?>> asyncCloseAllWorkingTsFileProcessors() {
writeLock("asyncCloseAllWorkingTsFileProcessors");
List<Future<?>> futures = new ArrayList<>();
int count = 0;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java
index c4fba1f..992b540 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java
@@ -227,9 +227,9 @@
new File(targetTsFile.getPath() + ModificationFile.FILE_SUFFIX).toPath(),
new File(sourceTsFile.getPath() + ModificationFile.FILE_SUFFIX).toPath());
}
- targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndexAfterClose());
+ targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndex());
targetFile.deserialize();
- targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndexAfterClose());
+ targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndex());
}
private boolean recoverTaskInfoFromLogFile() throws IOException {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java
index afbea98..3b251a0 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java
@@ -288,7 +288,7 @@
List<TsFileResource> unseqResources) {
for (TsFileResource targetResource : targetResources) {
for (TsFileResource unseqResource : unseqResources) {
- targetResource.updateProgressIndex(unseqResource.getMaxProgressIndexAfterClose());
+ targetResource.updateProgressIndex(unseqResource.getMaxProgressIndex());
targetResource.setGeneratedByPipe(
unseqResource.isGeneratedByPipe() && targetResource.isGeneratedByPipe());
targetResource.setGeneratedByPipeConsensus(
@@ -296,7 +296,7 @@
&& targetResource.isGeneratedByPipeConsensus());
}
for (TsFileResource seqResource : seqResources) {
- targetResource.updateProgressIndex(seqResource.getMaxProgressIndexAfterClose());
+ targetResource.updateProgressIndex(seqResource.getMaxProgressIndex());
targetResource.setGeneratedByPipe(
seqResource.isGeneratedByPipe() && targetResource.isGeneratedByPipe());
targetResource.setGeneratedByPipeConsensus(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java
index c91cb78..e4f095b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java
@@ -321,10 +321,7 @@
}
PipeInsertionDataNodeListener.getInstance()
.listenToInsertNode(
- dataRegionInfo.getDataRegion().getDataRegionId(),
- walFlushListener.getWalEntryHandler(),
- insertRowNode,
- tsFileResource);
+ dataRegionInfo.getDataRegion().getDataRegionId(), insertRowNode, tsFileResource);
int pointInserted;
if (insertRowNode.isAligned()) {
@@ -422,10 +419,7 @@
}
PipeInsertionDataNodeListener.getInstance()
.listenToInsertNode(
- dataRegionInfo.getDataRegion().getDataRegionId(),
- walFlushListener.getWalEntryHandler(),
- insertRowsNode,
- tsFileResource);
+ dataRegionInfo.getDataRegion().getDataRegionId(), insertRowsNode, tsFileResource);
int pointInserted = 0;
for (InsertRowNode insertRowNode : insertRowsNode.getInsertRowNodeList()) {
@@ -540,10 +534,7 @@
}
PipeInsertionDataNodeListener.getInstance()
.listenToInsertNode(
- dataRegionInfo.getDataRegion().getDataRegionId(),
- walFlushListener.getWalEntryHandler(),
- insertTabletNode,
- tsFileResource);
+ dataRegionInfo.getDataRegion().getDataRegionId(), insertTabletNode, tsFileResource);
int pointInserted;
try {
@@ -1148,13 +1139,6 @@
IMemTable tmpMemTable = workMemTable == null ? new NotifyFlushMemTable() : workMemTable;
try {
- PipeInsertionDataNodeListener.getInstance()
- .listenToTsFile(
- dataRegionInfo.getDataRegion().getDataRegionId(),
- tsFileResource,
- false,
- tmpMemTable.isTotallyGeneratedByPipe());
-
// When invoke closing TsFile after insert data to memTable, we shouldn't flush until invoke
// flushing memTable in System module.
Future<?> future = addAMemtableIntoFlushingList(tmpMemTable);
@@ -1586,6 +1570,16 @@
logger.debug("Start to end file {}", tsFileResource);
}
writer.endFile();
+
+ // Listen after "endFile" to avoid unnecessary waiting for tsFile close
+ // before resource serialization to avoid missing hardlink after restart
+ PipeInsertionDataNodeListener.getInstance()
+ .listenToTsFile(
+ dataRegionInfo.getDataRegion().getDataRegionId(),
+ tsFileResource,
+ false,
+ workMemTable != null && workMemTable.isTotallyGeneratedByPipe());
+
tsFileResource.serialize();
FileTimeIndexCacheRecorder.getInstance().logFileTimeIndex(tsFileResource);
if (logger.isDebugEnabled()) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java
index 734c144..a9a9878 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java
@@ -20,6 +20,7 @@
package org.apache.iotdb.db.storageengine.dataregion.tsfile;
import org.apache.iotdb.commons.utils.TimePartitionUtils;
+import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndexCacheRecorder;
import org.apache.iotdb.db.storageengine.rescon.memory.TsFileResourceManager;
@@ -276,6 +277,15 @@
} finally {
writeUnlock();
}
+
+ PipeDataNodeResourceManager.compaction()
+ .emitResult(
+ storageGroupName,
+ dataRegionId,
+ timePartition,
+ seqFileResources,
+ unseqFileResources,
+ targetFileResources);
}
public boolean contains(TsFileResource tsFileResource, boolean sequence) {
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java
index 7761ea5..ee05d2f 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java
@@ -1218,14 +1218,6 @@
.updateProgressIndex(getDataRegionId(), getTsFilePath(), maxProgressIndex);
}
- public ProgressIndex getMaxProgressIndexAfterClose() throws IllegalStateException {
- if (getStatus().equals(TsFileResourceStatus.UNCLOSED)) {
- throw new IllegalStateException(
- "Should not get progress index from a unclosing TsFileResource.");
- }
- return getMaxProgressIndex();
- }
-
public ProgressIndex getMaxProgressIndex() {
return maxProgressIndex == null ? MinimumProgressIndex.INSTANCE : maxProgressIndex;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java
index edc3060..36c404f 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java
@@ -342,7 +342,6 @@
info.metaData.add(size, searchIndex, walEntry.getMemTableId());
info.memTableId2WalDiskUsage.compute(
walEntry.getMemTableId(), (k, v) -> v == null ? size : v + size);
- walEntry.getWalFlushListener().getWalEntryHandler().setSize(size);
info.fsyncListeners.add(walEntry.getWalFlushListener());
}
@@ -593,13 +592,8 @@
// notify all waiting listeners
if (forceSuccess) {
- long position = lastFsyncPosition;
for (WALFlushListener fsyncListener : info.fsyncListeners) {
fsyncListener.succeed();
- if (fsyncListener.getWalEntryHandler() != null) {
- fsyncListener.getWalEntryHandler().setEntryPosition(walFileVersionId, position);
- position += fsyncListener.getWalEntryHandler().getSize();
- }
}
lastFsyncPosition = currentWALFileWriter.originalSize();
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java
index a8e94e2..18304e6 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java
@@ -77,14 +77,14 @@
} else {
throw new RuntimeException("Unknown WALEntry type");
}
- walFlushListener = new WALFlushListener(wait, value);
+ walFlushListener = new WALFlushListener(wait);
}
protected WALEntry(WALEntryType type, long memTableId, WALEntryValue value, boolean wait) {
this.type = type;
this.memTableId = memTableId;
this.value = value;
- this.walFlushListener = new WALFlushListener(wait, value);
+ this.walFlushListener = new WALFlushListener(wait);
}
public abstract void serialize(IWALByteBufferView buffer);
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java
index afa6651..6f801a7 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java
@@ -25,11 +25,9 @@
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.service.metrics.WritingMetrics;
import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
import org.apache.iotdb.db.storageengine.dataregion.wal.io.CheckpointWriter;
import org.apache.iotdb.db.storageengine.dataregion.wal.io.ILogWriter;
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.CheckpointFileUtils;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -177,9 +175,7 @@
return;
}
memTableInfo.setFlushed();
- if (!memTableInfo.isPinned()) {
- memTableId2Info.remove(memTableId);
- }
+ memTableId2Info.remove(memTableId);
Checkpoint checkpoint =
new Checkpoint(
CheckpointType.FLUSH_MEMORY_TABLE, Collections.singletonList(memTableInfo));
@@ -261,71 +257,9 @@
// endregion
- // region methods for pipe
- /**
- * Pin the wal files of the given memory table. Notice: cannot pin one memTable too long,
- * otherwise the wal disk usage may too large.
- *
- * @throws MemTablePinException If the memTable has been flushed
- */
- public void pinMemTable(long memTableId) throws MemTablePinException {
- infoLock.lock();
- try {
- if (!memTableId2Info.containsKey(memTableId)) {
- throw new MemTablePinException(
- String.format(
- "Fail to pin memTable-%d because this memTable doesn't exist in the wal.",
- memTableId));
- }
- MemTableInfo memTableInfo = memTableId2Info.get(memTableId);
- if (!memTableInfo.isPinned()) {
- WALInsertNodeCache.getInstance().addMemTable(memTableId);
- }
- memTableInfo.pin();
- } finally {
- infoLock.unlock();
- }
- }
-
- /**
- * Unpin the wal files of the given memory table.
- *
- * @throws MemTablePinException If there aren't corresponding pin operations
- */
- public void unpinMemTable(long memTableId) throws MemTablePinException {
- infoLock.lock();
- try {
- if (!memTableId2Info.containsKey(memTableId)) {
- throw new MemTablePinException(
- String.format(
- "Fail to unpin memTable-%d because this memTable doesn't exist in the wal.",
- memTableId));
- }
- if (!memTableId2Info.get(memTableId).isPinned()) {
- throw new MemTablePinException(
- String.format(
- "Fail to unpin memTable-%d because this memTable hasn't been pinned.", memTableId));
- }
- MemTableInfo memTableInfo = memTableId2Info.get(memTableId);
- memTableInfo.unpin();
- if (!memTableInfo.isPinned()) {
- WALInsertNodeCache.getInstance().removeMemTable(memTableId);
- if (memTableInfo.isFlushed()) {
- memTableId2Info.remove(memTableId);
- }
- }
- } finally {
- infoLock.unlock();
- }
- }
-
- // endregion
-
- /** Get MemTableInfo of oldest unpinned MemTable, whose first version id is smallest. */
- public MemTableInfo getOldestUnpinnedMemTableInfo() {
+ public MemTableInfo getOldestMemTableInfo() {
// find oldest memTable
return activeOrPinnedMemTables().stream()
- .filter(memTableInfo -> !memTableInfo.isPinned())
.min(Comparator.comparingLong(MemTableInfo::getMemTableId))
.orElse(null);
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java
index 59c2a3b..984006b 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java
@@ -47,8 +47,7 @@
// memTable
private IMemTable memTable;
- // memTable pin count
- private int pinCount;
+
// memTable is flushed or not
private boolean flushed;
// data region id
@@ -116,22 +115,6 @@
return memTable;
}
- public void pin() {
- this.pinCount++;
- }
-
- public void unpin() {
- this.pinCount--;
- }
-
- public boolean isPinned() {
- return pinCount > 0;
- }
-
- public int getPinCount() {
- return pinCount;
- }
-
public boolean isFlushed() {
return flushed;
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java
index b03b27a..2f257da 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java
@@ -20,7 +20,6 @@
package org.apache.iotdb.db.storageengine.dataregion.wal.io;
import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryPosition;
import java.io.Closeable;
import java.io.DataInputStream;
@@ -50,18 +49,6 @@
}
}
- public WALByteBufReader(WALEntryPosition walEntryPosition) throws IOException {
- WALInputStream walInputStream = walEntryPosition.openReadFileStream();
- try {
- this.logStream = new DataInputStream(walInputStream);
- this.metaData = walInputStream.getWALMetaData();
- this.sizeIterator = metaData.getBuffersSize().iterator();
- } catch (Exception e) {
- walInputStream.close();
- throw e;
- }
- }
-
/** Like {@link Iterator#hasNext()}. */
public boolean hasNext() {
return sizeIterator.hasNext();
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java
index 38b69f1..dfa7bf6 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java
@@ -41,9 +41,9 @@
public WALFakeNode(Status status, Exception cause) {
this.status = status;
- this.successListener = new WALFlushListener(false, null);
+ this.successListener = new WALFlushListener(false);
this.successListener.succeed();
- this.failListener = new WALFlushListener(false, null);
+ this.failListener = new WALFlushListener(false);
this.failListener.fail(cause);
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java
index 35432a8..003c747 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java
@@ -49,7 +49,6 @@
import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.CheckpointManager;
import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.CheckpointType;
import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.MemTableInfo;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
import org.apache.iotdb.db.storageengine.dataregion.wal.io.WALByteBufReader;
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALFileStatus;
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALFileUtils;
@@ -57,7 +56,6 @@
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.AbstractResultListener.Status;
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener;
-import org.apache.commons.lang3.StringUtils;
import org.apache.tsfile.fileSystem.FSFactoryProducer;
import org.apache.tsfile.utils.TsFileUtils;
import org.slf4j.Logger;
@@ -183,7 +181,6 @@
buffer.write(walEntry);
// set handler for pipe
- walEntry.getWalFlushListener().getWalEntryHandler().setWalNode(this, walEntry.getMemTableId());
return walEntry.getWalFlushListener();
}
@@ -228,25 +225,6 @@
// region methods for pipe
- /**
- * Pin the wal files of the given memory table. Notice: cannot pin one memTable too long,
- * otherwise the wal disk usage may too large.
- *
- * @throws MemTablePinException If the memTable has been flushed
- */
- public void pinMemTable(long memTableId) throws MemTablePinException {
- checkpointManager.pinMemTable(memTableId);
- }
-
- /**
- * Unpin the wal files of the given memory table.
- *
- * @throws MemTablePinException If there aren't corresponding pin operations
- */
- public void unpinMemTable(long memTableId) throws MemTablePinException {
- checkpointManager.unpinMemTable(memTableId);
- }
-
// endregion
// region Task to delete outdated .wal files
@@ -270,8 +248,6 @@
// the effective information ratio
private double effectiveInfoRatio = 1.0d;
- private List<Long> pinnedMemTableIds;
-
private int fileIndexAfterFilterSafelyDeleteIndex = Integer.MAX_VALUE;
private List<Long> successfullyDeleted;
private long deleteFileSize;
@@ -297,7 +273,6 @@
this.sortedWalFilesExcludingLast =
Arrays.copyOfRange(allWalFilesOfOneNode, 0, allWalFilesOfOneNode.length - 1);
this.activeOrPinnedMemTables = checkpointManager.activeOrPinnedMemTables();
- this.pinnedMemTableIds = initPinnedMemTableIds();
this.fileIndexAfterFilterSafelyDeleteIndex = initFileIndexAfterFilterSafelyDeleteIndex();
this.successfullyDeleted = new ArrayList<>();
this.deleteFileSize = 0;
@@ -318,20 +293,6 @@
}
}
- private List<Long> initPinnedMemTableIds() {
- List<MemTableInfo> memTableInfos = checkpointManager.activeOrPinnedMemTables();
- if (memTableInfos.isEmpty()) {
- return new ArrayList<>();
- }
- List<Long> pinnedIds = new ArrayList<>();
- for (MemTableInfo memTableInfo : memTableInfos) {
- if (memTableInfo.isFlushed() && memTableInfo.isPinned()) {
- pinnedIds.add(memTableInfo.getMemTableId());
- }
- }
- return pinnedIds;
- }
-
@Override
public void run() {
// The intent of the loop execution here is to try to get as many memTable flush or snapshot
@@ -365,7 +326,7 @@
private void updateEffectiveInfoRationAndUpdateMetric() {
// calculate effective information ratio
long costOfActiveMemTables = checkpointManager.getTotalCostOfActiveMemTables();
- MemTableInfo oldestUnpinnedMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo();
+ MemTableInfo oldestUnpinnedMemTableInfo = checkpointManager.getOldestMemTableInfo();
long avgFileSize =
getFileNum() != 0
? getDiskUsage() / getFileNum()
@@ -389,45 +350,10 @@
}
private void summarizeExecuteResult() {
- if (!pinnedMemTableIds.isEmpty()
- || fileIndexAfterFilterSafelyDeleteIndex < sortedWalFilesExcludingLast.length) {
- if (logger.isDebugEnabled()) {
- StringBuilder summary =
- new StringBuilder(
- String.format(
- "wal node-%s delete outdated files summary:the range is: [%d,%d], delete successful is [%s], safely delete file index is: [%s].The following reasons influenced the result: %s",
- identifier,
- WALFileUtils.parseVersionId(sortedWalFilesExcludingLast[0].getName()),
- WALFileUtils.parseVersionId(
- sortedWalFilesExcludingLast[sortedWalFilesExcludingLast.length - 1]
- .getName()),
- StringUtils.join(successfullyDeleted, ","),
- fileIndexAfterFilterSafelyDeleteIndex,
- System.lineSeparator()));
-
- if (!pinnedMemTableIds.isEmpty()) {
- summary
- .append("- MemTable has been flushed but pinned by PIPE, the MemTableId list is : ")
- .append(StringUtils.join(pinnedMemTableIds, ","))
- .append(".")
- .append(System.lineSeparator());
- }
- if (fileIndexAfterFilterSafelyDeleteIndex < sortedWalFilesExcludingLast.length) {
- summary.append(
- String.format(
- "- The data in the wal file was not consumed by the consensus group,current search index is %d, safely delete index is %d",
- getCurrentSearchIndex(), safelyDeletedSearchIndex));
- }
- String summaryLog = summary.toString();
- logger.debug(summaryLog);
- }
-
- } else {
- logger.debug(
- "Successfully delete {} outdated wal files for wal node-{}",
- successfullyDeleted.size(),
- identifier);
- }
+ logger.debug(
+ "Successfully delete {} outdated wal files for wal node-{}",
+ successfullyDeleted.size(),
+ identifier);
}
/** Delete obsolete wal files while recording which succeeded or failed */
@@ -475,20 +401,10 @@
return false;
}
// find oldest memTable
- MemTableInfo oldestMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo();
+ MemTableInfo oldestMemTableInfo = checkpointManager.getOldestMemTableInfo();
if (oldestMemTableInfo == null) {
return false;
}
- if (oldestMemTableInfo.isPinned()) {
- logger.warn(
- "Pipe: Effective information ratio {} of wal node-{} is below wal min effective info ratio {}. But fail to delete memTable-{}'s wal files because they are pinned by the Pipe module. Pin count: {}.",
- effectiveInfoRatio,
- identifier,
- config.getWalMinEffectiveInfoRatio(),
- oldestMemTableInfo.getMemTableId(),
- oldestMemTableInfo.getPinCount());
- return false;
- }
IMemTable oldestMemTable = oldestMemTableInfo.getMemTable();
if (oldestMemTable == null) {
return false;
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryHandler.java
deleted file mode 100644
index f5d7406..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryHandler.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.storageengine.dataregion.wal.utils;
-
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
-import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry;
-import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.node.WALNode;
-
-import org.apache.tsfile.utils.Pair;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * This handler is used by the Pipe to find the corresponding {@link InsertNode}. Besides, it can
- * try to pin/unpin the {@link WALEntry}s by the memTable id.
- */
-public class WALEntryHandler {
-
- private static final Logger logger = LoggerFactory.getLogger(WALEntryHandler.class);
-
- private long memTableId = -1;
-
- // cached value, null after this value is flushed to wal successfully
- @SuppressWarnings("squid:S3077")
- private volatile WALEntryValue value;
-
- // wal entry's position in the wal, valid after the value is flushed to wal successfully
- // it's safe to use volatile here to make this reference thread-safe.
- @SuppressWarnings("squid:S3077")
- private final WALEntryPosition walEntryPosition = new WALEntryPosition();
-
- // wal node, null when wal is disabled
- private WALNode walNode = null;
-
- private volatile boolean isHardlink = false;
- private final AtomicReference<File> hardlinkFile = new AtomicReference<>();
-
- public WALEntryHandler(final WALEntryValue value) {
- this.value = value;
- }
-
- /**
- * Pin the wal files of the given memory table. Notice: cannot pin one memTable too long,
- * otherwise the wal disk usage may too large.
- *
- * @throws MemTablePinException If the memTable has been flushed
- */
- public void pinMemTable() throws MemTablePinException {
- if (walNode == null || memTableId < 0) {
- throw new MemTablePinException("Fail to pin memTable because of internal error.");
- }
- walNode.pinMemTable(memTableId);
- }
-
- /**
- * Unpin the wal files of the given memory table.
- *
- * @throws MemTablePinException If there aren't corresponding pin operations
- */
- public void unpinMemTable() throws MemTablePinException {
- if (walNode == null || memTableId < 0) {
- throw new MemTablePinException("Fail to pin memTable because of internal error.");
- }
- walNode.unpinMemTable(memTableId);
- }
-
- public InsertNode getInsertNodeViaCacheIfPossible() {
- try {
- final WALEntryValue finalValue = value;
- if (finalValue instanceof InsertNode) {
- return (InsertNode) finalValue;
- }
- final Pair<ByteBuffer, InsertNode> byteBufferInsertNodePair =
- walEntryPosition.getByteBufferOrInsertNodeIfPossible();
- return byteBufferInsertNodePair == null ? null : byteBufferInsertNodePair.getRight();
- } catch (final Exception e) {
- logger.warn("Fail to get insert node via cache. {}", this, e);
- throw e;
- }
- }
-
- /**
- * Get this handler's value.
- *
- * @throws WALPipeException when failing to get the value.
- */
- public InsertNode getInsertNode() throws WALPipeException {
- // return local cache
- final WALEntryValue res = value;
- if (res != null) {
- if (res instanceof InsertNode) {
- return (InsertNode) res;
- } else {
- throw new WALPipeException("Fail to get value because the entry type isn't InsertNode.");
- }
- }
-
- // wait until the position is ready
- while (!walEntryPosition.canRead()) {
- try {
- synchronized (this) {
- this.wait();
- }
- } catch (final InterruptedException e) {
- logger.warn("Interrupted when waiting for result.", e);
- Thread.currentThread().interrupt();
- }
- }
-
- final InsertNode node = isHardlink ? readFromHardlinkWALFile() : readFromOriginalWALFile();
- if (node == null) {
- throw new WALPipeException(
- String.format("Fail to get the wal value of the position %s.", walEntryPosition));
- }
- return node;
- }
-
- public ByteBuffer getByteBuffer() throws WALPipeException {
- // wait until the position is ready
- while (!walEntryPosition.canRead()) {
- try {
- synchronized (this) {
- this.wait();
- }
- } catch (InterruptedException e) {
- logger.warn("Interrupted when waiting for result.", e);
- Thread.currentThread().interrupt();
- }
- }
-
- final ByteBuffer buffer = readByteBufferFromWALFile();
- if (buffer == null) {
- throw new WALPipeException(
- String.format("Fail to get the wal value of the position %s.", walEntryPosition));
- }
- return buffer;
- }
-
- private InsertNode readFromOriginalWALFile() throws WALPipeException {
- try {
- return walEntryPosition.readInsertNodeViaCacheAfterCanRead();
- } catch (Exception e) {
- throw new WALPipeException("Fail to get value because the file content isn't correct.", e);
- }
- }
-
- private InsertNode readFromHardlinkWALFile() throws WALPipeException {
- try {
- return walEntryPosition.readInsertNodeViaCacheAfterCanRead();
- } catch (Exception e) {
- throw new WALPipeException("Fail to get value because the file content isn't correct.", e);
- }
- }
-
- private ByteBuffer readByteBufferFromWALFile() throws WALPipeException {
- try {
- return walEntryPosition.readByteBufferViaCacheAfterCanRead();
- } catch (Exception e) {
- throw new WALPipeException("Fail to get value because the file content isn't correct.", e);
- }
- }
-
- public void setWalNode(final WALNode walNode, final long memTableId) {
- this.walNode = walNode;
- this.memTableId = memTableId;
- walEntryPosition.setWalNode(walNode, memTableId);
- }
-
- public long getMemTableId() {
- return memTableId;
- }
-
- public void setEntryPosition(final long walFileVersionId, final long position) {
- this.walEntryPosition.setEntryPosition(walFileVersionId, position, value);
- this.value = null;
- synchronized (this) {
- this.notifyAll();
- }
- }
-
- public WALEntryPosition getWalEntryPosition() {
- return walEntryPosition;
- }
-
- public int getSize() {
- return walEntryPosition.getSize();
- }
-
- public void setSize(final int size) {
- this.walEntryPosition.setSize(size);
- }
-
- public void hardlinkTo(File hardlinkFile) {
- isHardlink = true;
- this.hardlinkFile.set(hardlinkFile);
- }
-
- @Override
- public String toString() {
- return "WALEntryHandler{"
- + "memTableId="
- + memTableId
- + ", value="
- + value
- + ", walEntryPosition="
- + walEntryPosition
- + '}';
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryPosition.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryPosition.java
deleted file mode 100644
index 4d71cb1..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryPosition.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.storageengine.dataregion.wal.utils;
-
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
-import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue;
-import org.apache.iotdb.db.storageengine.dataregion.wal.io.WALInputStream;
-import org.apache.iotdb.db.storageengine.dataregion.wal.node.WALNode;
-
-import org.apache.tsfile.utils.Pair;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.file.StandardOpenOption;
-import java.util.Objects;
-
-/**
- * This class uses the tuple(identifier, file, position) to denote the position of the wal entry,
- * and give some methods to read the content from the disk.
- */
-public class WALEntryPosition {
- private volatile String identifier = "";
- private volatile long walFileVersionId = -1;
- private volatile long position;
- private volatile int size;
- // wal node, null when wal is disabled
- private WALNode walNode = null;
- // wal file is not null when openReadFileChannel method has been called
- private File walFile = null;
- // cache for wal entry
- private WALInsertNodeCache cache = null;
-
- private static final String ENTRY_NOT_READY_MESSAGE = "This entry isn't ready for read.";
-
- public WALEntryPosition() {}
-
- public WALEntryPosition(String identifier, long walFileVersionId, long position, int size) {
- this.identifier = identifier;
- this.walFileVersionId = walFileVersionId;
- this.position = position;
- this.size = size;
- }
-
- /**
- * Try to read the wal entry directly from the cache. No need to check if the wal entry is ready
- * for read.
- */
- public Pair<ByteBuffer, InsertNode> getByteBufferOrInsertNodeIfPossible() {
- return cache.getByteBufferOrInsertNodeIfPossible(this);
- }
-
- /**
- * Read the wal entry and parse it to the InsertNode. Use LRU cache to accelerate read.
- *
- * @throws IOException failing to read.
- */
- public InsertNode readInsertNodeViaCacheAfterCanRead() throws IOException {
- if (!canRead()) {
- throw new IOException(ENTRY_NOT_READY_MESSAGE);
- }
- return cache.getInsertNode(this);
- }
-
- /**
- * Read the wal entry and get the raw bytebuffer. Use LRU cache to accelerate read.
- *
- * @throws IOException failing to read.
- */
- public ByteBuffer readByteBufferViaCacheAfterCanRead() throws IOException {
- if (!canRead()) {
- throw new IOException(ENTRY_NOT_READY_MESSAGE);
- }
- return cache.getByteBuffer(this);
- }
-
- /**
- * Read the byte buffer directly.
- *
- * @throws IOException failing to read.
- */
- ByteBuffer read() throws IOException {
- if (!canRead()) {
- throw new IOException("Target file hasn't been specified.");
- }
- // TODO: Reuse the file stream
- try (WALInputStream is = openReadFileStream()) {
- is.skipToGivenLogicalPosition(position);
- ByteBuffer buffer = ByteBuffer.allocate(size);
- is.read(buffer);
- return buffer;
- }
- }
-
- /**
- * Open the read file channel for this wal entry, this method will retry automatically when the
- * file is sealed when opening the file channel.
- *
- * @throws IOException failing to open the file channel.
- */
- public FileChannel openReadFileChannel() throws IOException {
- if (isInSealedFile()) {
- walFile = walNode.getWALFile(walFileVersionId);
- return FileChannel.open(walFile.toPath(), StandardOpenOption.READ);
- } else {
- try {
- walFile = walNode.getWALFile(walFileVersionId);
- return FileChannel.open(walFile.toPath(), StandardOpenOption.READ);
- } catch (IOException e) {
- // unsealed file may be renamed after sealed, so we should try again
- if (isInSealedFile()) {
- walFile = walNode.getWALFile(walFileVersionId);
- return FileChannel.open(walFile.toPath(), StandardOpenOption.READ);
- } else {
- throw e;
- }
- }
- }
- }
-
- public WALInputStream openReadFileStream() throws IOException {
- // TODO: Refactor this part of code
- if (isInSealedFile()) {
- walFile = walNode.getWALFile(walFileVersionId);
- return new WALInputStream(walFile);
- } else {
- try {
- walFile = walNode.getWALFile(walFileVersionId);
- return new WALInputStream(walFile);
- } catch (IOException e) {
- // unsealed file may be renamed after sealed, so we should try again
- if (isInSealedFile()) {
- walFile = walNode.getWALFile(walFileVersionId);
- return new WALInputStream(walFile);
- } else {
- throw e;
- }
- }
- }
- }
-
- public File getWalFile() {
- return walFile;
- }
-
- /** Return true only when the tuple(file, position, size) is ready. */
- public boolean canRead() {
- return walFileVersionId >= 0;
- }
-
- /** Return true only when this wal file is sealed. */
- public boolean isInSealedFile() {
- if (walNode == null || !canRead()) {
- throw new RuntimeException(ENTRY_NOT_READY_MESSAGE);
- }
- return walFileVersionId < walNode.getCurrentWALFileVersion();
- }
-
- public void setWalNode(WALNode walNode, long memTableId) {
- this.walNode = walNode;
- identifier = walNode.getIdentifier();
- cache = WALInsertNodeCache.getInstance();
- }
-
- public String getIdentifier() {
- return identifier;
- }
-
- public void setEntryPosition(long walFileVersionId, long position, WALEntryValue value) {
- this.position = position;
- this.walFileVersionId = walFileVersionId;
- if (cache != null && value instanceof InsertNode) {
- cache.cacheInsertNodeIfNeeded(this, (InsertNode) value);
- }
- }
-
- public long getPosition() {
- return position;
- }
-
- public long getWalFileVersionId() {
- return walFileVersionId;
- }
-
- public void setSize(int size) {
- this.size = size;
- }
-
- public int getSize() {
- return size;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(identifier, walFileVersionId, position);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- WALEntryPosition that = (WALEntryPosition) o;
- return identifier.equals(that.identifier)
- && walFileVersionId == that.walFileVersionId
- && position == that.position;
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCache.java
deleted file mode 100644
index f69dc4a..0000000
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCache.java
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.storageengine.dataregion.wal.utils;
-
-import org.apache.iotdb.commons.pipe.config.PipeConfig;
-import org.apache.iotdb.commons.utils.TestOnly;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager;
-import org.apache.iotdb.db.pipe.resource.memory.InsertNodeMemoryEstimator;
-import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlockType;
-import org.apache.iotdb.db.pipe.resource.memory.PipeModelFixedMemoryBlock;
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode;
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode;
-import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry;
-import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryType;
-import org.apache.iotdb.db.storageengine.dataregion.wal.io.WALByteBufReader;
-
-import com.github.benmanes.caffeine.cache.CacheLoader;
-import com.github.benmanes.caffeine.cache.Caffeine;
-import com.github.benmanes.caffeine.cache.LoadingCache;
-import com.github.benmanes.caffeine.cache.Weigher;
-import org.apache.tsfile.utils.Pair;
-import org.checkerframework.checker.nullness.qual.NonNull;
-import org.checkerframework.checker.nullness.qual.Nullable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-/** This cache is used by {@link WALEntryPosition}. */
-public class WALInsertNodeCache {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(WALInsertNodeCache.class);
- private static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig();
- private static final PipeConfig PIPE_CONFIG = PipeConfig.getInstance();
-
- private static PipeModelFixedMemoryBlock walModelFixedMemory = null;
-
- // LRU cache, find Pair<ByteBuffer, InsertNode> by WALEntryPosition
- private final LoadingCache<WALEntryPosition, Pair<ByteBuffer, InsertNode>> lruCache;
-
- // ids of all pinned memTables
- private final Set<Long> memTablesNeedSearch = ConcurrentHashMap.newKeySet();
-
- private volatile boolean hasPipeRunning = false;
-
- private WALInsertNodeCache() {
- if (walModelFixedMemory == null) {
- init();
- }
-
- final long requestedAllocateSize =
- (long)
- (PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes()
- * PIPE_CONFIG.getPipeDataStructureWalMemoryProportion());
-
- lruCache =
- Caffeine.newBuilder()
- .maximumWeight(requestedAllocateSize)
- .weigher(
- (Weigher<WALEntryPosition, Pair<ByteBuffer, InsertNode>>)
- (position, pair) -> {
- long weightInLong = 0L;
- if (pair.right != null) {
- weightInLong = InsertNodeMemoryEstimator.sizeOf(pair.right);
- } else {
- weightInLong = position.getSize();
- }
- if (weightInLong <= 0) {
- return Integer.MAX_VALUE;
- }
- final int weightInInt = (int) weightInLong;
- return weightInInt != weightInLong ? Integer.MAX_VALUE : weightInInt;
- })
- .recordStats()
- .build(new WALInsertNodeCacheLoader());
- }
-
- // please call this method at PipeLauncher
- public static void init() {
- if (walModelFixedMemory != null) {
- return;
- }
- try {
- // Allocate memory for the fixed memory block of WAL
- walModelFixedMemory =
- PipeDataNodeResourceManager.memory()
- .forceAllocateForModelFixedMemoryBlock(
- (long)
- (PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes()
- * PIPE_CONFIG.getPipeDataStructureWalMemoryProportion()),
- PipeMemoryBlockType.WAL);
- } catch (Exception e) {
- LOGGER.error("Failed to initialize WAL model fixed memory block", e);
- walModelFixedMemory =
- PipeDataNodeResourceManager.memory()
- .forceAllocateForModelFixedMemoryBlock(0, PipeMemoryBlockType.WAL);
- }
- }
-
- /////////////////////////// Getter & Setter ///////////////////////////
-
- public InsertNode getInsertNode(final WALEntryPosition position) {
- final Pair<ByteBuffer, InsertNode> pair = getByteBufferOrInsertNode(position);
-
- if (pair.getRight() != null) {
- return pair.getRight();
- }
-
- if (pair.getLeft() == null) {
- throw new IllegalStateException();
- }
-
- try {
- // multi pipes may share the same wal entry, so we need to wrap the byte[] into
- // different ByteBuffer for each pipe
- final InsertNode insertNode = parse(ByteBuffer.wrap(pair.getLeft().array()));
- pair.setRight(insertNode);
- return insertNode;
- } catch (final Exception e) {
- LOGGER.error(
- "Parsing failed when recovering insertNode from wal, walFile:{}, position:{}, size:{}, exception:",
- position.getWalFile(),
- position.getPosition(),
- position.getSize(),
- e);
- throw e;
- }
- }
-
- private InsertNode parse(final ByteBuffer buffer) {
- final PlanNode node = WALEntry.deserializeForConsensus(buffer);
- if (node instanceof InsertNode) {
- return (InsertNode) node;
- } else {
- return null;
- }
- }
-
- public ByteBuffer getByteBuffer(final WALEntryPosition position) {
- Pair<ByteBuffer, InsertNode> pair = getByteBufferOrInsertNode(position);
-
- if (pair.getLeft() != null) {
- // multi pipes may share the same wal entry, so we need to wrap the byte[] into
- // different ByteBuffer for each pipe
- return ByteBuffer.wrap(pair.getLeft().array());
- }
-
- // forbid multi threads to invalidate and load the same entry
- synchronized (this) {
- lruCache.invalidate(position);
- pair = getByteBufferOrInsertNode(position);
- }
-
- if (pair.getLeft() == null) {
- throw new IllegalStateException();
- }
-
- return ByteBuffer.wrap(pair.getLeft().array());
- }
-
- public Pair<ByteBuffer, InsertNode> getByteBufferOrInsertNode(final WALEntryPosition position) {
- hasPipeRunning = true;
-
- final Pair<ByteBuffer, InsertNode> pair = lruCache.get(position);
-
- if (pair == null) {
- throw new IllegalStateException();
- }
-
- return pair;
- }
-
- public Pair<ByteBuffer, InsertNode> getByteBufferOrInsertNodeIfPossible(
- final WALEntryPosition position) {
- hasPipeRunning = true;
- return lruCache.getIfPresent(position);
- }
-
- public void cacheInsertNodeIfNeeded(
- final WALEntryPosition walEntryPosition, final InsertNode insertNode) {
- // reduce memory usage
- if (hasPipeRunning) {
- lruCache.put(walEntryPosition, new Pair<>(null, insertNode));
- }
- }
-
- //////////////////////////// APIs provided for metric framework ////////////////////////////
-
- public double getCacheHitRate() {
- return Objects.nonNull(lruCache) ? lruCache.stats().hitRate() : 0;
- }
-
- public double getCacheHitCount() {
- return Objects.nonNull(lruCache) ? lruCache.stats().hitCount() : 0;
- }
-
- public double getCacheRequestCount() {
- return Objects.nonNull(lruCache) ? lruCache.stats().requestCount() : 0;
- }
-
- /////////////////////////// MemTable ///////////////////////////
-
- public void addMemTable(final long memTableId) {
- memTablesNeedSearch.add(memTableId);
- }
-
- public void removeMemTable(final long memTableId) {
- memTablesNeedSearch.remove(memTableId);
- }
-
- /////////////////////////// Cache Loader ///////////////////////////
-
- class WALInsertNodeCacheLoader
- implements CacheLoader<WALEntryPosition, Pair<ByteBuffer, InsertNode>> {
-
- @Override
- public @Nullable Pair<ByteBuffer, InsertNode> load(@NonNull final WALEntryPosition key)
- throws Exception {
- return new Pair<>(key.read(), null);
- }
-
- /** Batch load all wal entries in the file when any one key is absent. */
- @Override
- public @NonNull Map<@NonNull WALEntryPosition, @NonNull Pair<ByteBuffer, InsertNode>> loadAll(
- @NonNull final Iterable<? extends @NonNull WALEntryPosition> walEntryPositions) {
- final Map<WALEntryPosition, Pair<ByteBuffer, InsertNode>> loadedEntries = new HashMap<>();
-
- for (final WALEntryPosition walEntryPosition : walEntryPositions) {
- if (loadedEntries.containsKey(walEntryPosition) || !walEntryPosition.canRead()) {
- continue;
- }
-
- final long walFileVersionId = walEntryPosition.getWalFileVersionId();
-
- // load one when wal file is not sealed
- if (!walEntryPosition.isInSealedFile()) {
- try {
- loadedEntries.put(walEntryPosition, load(walEntryPosition));
- } catch (final Exception e) {
- LOGGER.info(
- "Fail to cache wal entries from the wal file with version id {}",
- walFileVersionId,
- e);
- }
- continue;
- }
-
- // batch load when wal file is sealed
- long position = 0;
- try (final WALByteBufReader walByteBufReader = new WALByteBufReader(walEntryPosition)) {
- while (walByteBufReader.hasNext()) {
- // see WALInfoEntry#serialize, entry type + memtable id + plan node type
- final ByteBuffer buffer = walByteBufReader.next();
-
- final int size = buffer.capacity();
- final WALEntryType type = WALEntryType.valueOf(buffer.get());
- final long memTableId = buffer.getLong();
-
- if ((memTablesNeedSearch.contains(memTableId)
- || walEntryPosition.getPosition() == position)
- && type.needSearch()) {
- buffer.clear();
- loadedEntries.put(
- new WALEntryPosition(
- walEntryPosition.getIdentifier(), walFileVersionId, position, size),
- new Pair<>(buffer, null));
- }
-
- position += size;
- }
- } catch (final IOException e) {
- LOGGER.info(
- "Fail to cache wal entries from the wal file with version id {}",
- walFileVersionId,
- e);
- }
- }
-
- return loadedEntries;
- }
- }
-
- /////////////////////////// Singleton ///////////////////////////
-
- public static WALInsertNodeCache getInstance() {
- return InstanceHolder.INSTANCE;
- }
-
- private static class InstanceHolder {
-
- public static final WALInsertNodeCache INSTANCE = new WALInsertNodeCache();
-
- private InstanceHolder() {
- // forbidding instantiation
- }
- }
-
- /////////////////////////// Test Only ///////////////////////////
-
- @TestOnly
- boolean contains(WALEntryPosition position) {
- return lruCache.getIfPresent(position) != null;
- }
-
- @TestOnly
- public void clear() {
- lruCache.invalidateAll();
- memTablesNeedSearch.clear();
- }
-}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java
index 8c84a0c..7896ea0 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java
@@ -19,20 +19,10 @@
package org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener;
-import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-
/** This class helps judge whether wal is flushed to the storage device. */
public class WALFlushListener extends AbstractResultListener {
- // handler for pipeline, only exists when value is InsertNode
- private final WALEntryHandler walEntryHandler;
- public WALFlushListener(boolean wait, WALEntryValue value) {
+ public WALFlushListener(boolean wait) {
super(wait);
- walEntryHandler = new WALEntryHandler(value);
- }
-
- public WALEntryHandler getWalEntryHandler() {
- return walEntryHandler;
}
}
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java
index 7dfe91a..cb147f9 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java
@@ -20,9 +20,6 @@
package org.apache.iotdb.db.storageengine.load.converter;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
-import org.apache.iotdb.commons.concurrent.IoTThreadFactory;
-import org.apache.iotdb.commons.concurrent.ThreadName;
-import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor;
import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletRawReq;
@@ -48,14 +45,6 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import static org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil.calculateTabletSizeInBytes;
@@ -70,9 +59,6 @@
.getConfig()
.getLoadTsFileTabletConversionBatchMemorySizeInBytes();
- private static final AtomicReference<WrappedThreadPoolExecutor> executorPool =
- new AtomicReference<>();
-
private final StatementExecutor statementExecutor;
@FunctionalInterface
@@ -80,21 +66,6 @@
TSStatus execute(final Statement statement);
}
- public static class CallerBlocksPolicy implements RejectedExecutionHandler {
- public CallerBlocksPolicy() {}
-
- public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
- if (!e.isShutdown()) {
- try {
- e.getQueue().put(r);
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- throw new RejectedExecutionException("task " + r + " rejected from " + e, ie);
- }
- }
- }
- }
-
public LoadTreeStatementDataTypeConvertExecutionVisitor(
final StatementExecutor statementExecutor) {
this.statementExecutor = statementExecutor;
@@ -118,7 +89,6 @@
final List<Long> tabletRawReqSizes = new ArrayList<>();
try {
- final List<Future<TSStatus>> executionFutures = new ArrayList<>();
for (final File file : loadTsFileStatement.getTsFiles()) {
try (final TsFileInsertionScanDataContainer container =
new TsFileInsertionScanDataContainer(
@@ -136,16 +106,9 @@
continue;
}
- final InsertMultiTabletsStatement batchStatement = new InsertMultiTabletsStatement();
- batchStatement.setInsertTabletStatementList(
- tabletRawReqs.stream()
- .map(
- req ->
- new LoadConvertedInsertTabletStatement(
- req.constructStatement(),
- loadTsFileStatement.isConvertOnTypeMismatch()))
- .collect(Collectors.toList()));
- executionFutures.add(executeInsertMultiTabletsWithRetry(batchStatement));
+ final TSStatus result =
+ executeInsertMultiTabletsWithRetry(
+ tabletRawReqs, loadTsFileStatement.isConvertOnTypeMismatch());
for (final long memoryCost : tabletRawReqSizes) {
block.reduceMemoryUsage(memoryCost);
@@ -153,6 +116,10 @@
tabletRawReqs.clear();
tabletRawReqSizes.clear();
+ if (!handleTSStatus(result, loadTsFileStatement)) {
+ return Optional.empty();
+ }
+
tabletRawReqs.add(tabletRawReq);
tabletRawReqSizes.add(curMemory);
block.addMemoryUsage(curMemory);
@@ -166,39 +133,25 @@
if (!tabletRawReqs.isEmpty()) {
try {
- final InsertMultiTabletsStatement batchStatement = new InsertMultiTabletsStatement();
- batchStatement.setInsertTabletStatementList(
- tabletRawReqs.stream()
- .map(
- req ->
- new LoadConvertedInsertTabletStatement(
- req.constructStatement(),
- loadTsFileStatement.isConvertOnTypeMismatch()))
- .collect(Collectors.toList()));
- executionFutures.add(executeInsertMultiTabletsWithRetry(batchStatement));
+ final TSStatus result =
+ executeInsertMultiTabletsWithRetry(
+ tabletRawReqs, loadTsFileStatement.isConvertOnTypeMismatch());
for (final long memoryCost : tabletRawReqSizes) {
block.reduceMemoryUsage(memoryCost);
}
tabletRawReqs.clear();
tabletRawReqSizes.clear();
+
+ if (!handleTSStatus(result, loadTsFileStatement)) {
+ return Optional.empty();
+ }
} catch (final Exception e) {
LOGGER.warn(
"Failed to convert data type for LoadTsFileStatement: {}.", loadTsFileStatement, e);
return Optional.empty();
}
}
-
- for (final Future<TSStatus> future : executionFutures) {
- try {
- if (!handleTSStatus(future.get(), loadTsFileStatement)) {
- return Optional.empty();
- }
- } catch (ExecutionException | InterruptedException e) {
- LOGGER.warn("Exception occurs when executing insertion during tablet conversion: ", e);
- return Optional.empty();
- }
- }
} finally {
for (final long memoryCost : tabletRawReqSizes) {
block.reduceMemoryUsage(memoryCost);
@@ -226,67 +179,43 @@
return Optional.of(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()));
}
- private Future<TSStatus> executeInsertMultiTabletsWithRetry(
- final InsertMultiTabletsStatement batchStatement) {
- return getExecutorPool()
- .submit(
- () -> {
- TSStatus result;
- try {
- result =
- batchStatement.accept(
- LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR,
- statementExecutor.execute(batchStatement));
+ private TSStatus executeInsertMultiTabletsWithRetry(
+ final List<PipeTransferTabletRawReq> tabletRawReqs, boolean isConvertOnTypeMismatch) {
+ final InsertMultiTabletsStatement batchStatement = new InsertMultiTabletsStatement();
+ batchStatement.setInsertTabletStatementList(
+ tabletRawReqs.stream()
+ .map(
+ req ->
+ new LoadConvertedInsertTabletStatement(
+ req.constructStatement(), isConvertOnTypeMismatch))
+ .collect(Collectors.toList()));
- // Retry max 5 times if the write process is rejected
- for (int i = 0;
- i < 5
- && result.getCode()
- == TSStatusCode.LOAD_TEMPORARY_UNAVAILABLE_EXCEPTION.getStatusCode();
- i++) {
- Thread.sleep(100L * (i + 1));
- result =
- batchStatement.accept(
- LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR,
- statementExecutor.execute(batchStatement));
- }
- } catch (final Exception e) {
- if (e instanceof InterruptedException) {
- Thread.currentThread().interrupt();
- }
- result =
- batchStatement.accept(
- LoadTsFileDataTypeConverter.STATEMENT_EXCEPTION_VISITOR, e);
- }
- return result;
- });
- }
+ TSStatus result;
+ try {
+ result =
+ batchStatement.accept(
+ LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR,
+ statementExecutor.execute(batchStatement));
- public static WrappedThreadPoolExecutor getExecutorPool() {
- if (executorPool.get() == null) {
- synchronized (executorPool) {
- if (executorPool.get() == null) {
- executorPool.set(
- new WrappedThreadPoolExecutor(
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getLoadTsFileTabletConversionThreadCount(),
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getLoadTsFileTabletConversionThreadCount(),
- 0L,
- TimeUnit.SECONDS,
- new ArrayBlockingQueue<>(
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getLoadTsFileTabletConversionThreadCount()),
- new IoTThreadFactory(ThreadName.LOAD_DATATYPE_CONVERT_POOL.getName()),
- ThreadName.LOAD_DATATYPE_CONVERT_POOL.getName(),
- new CallerBlocksPolicy()));
- }
+ // Retry max 5 times if the write process is rejected
+ for (int i = 0;
+ i < 5
+ && result.getCode()
+ == TSStatusCode.LOAD_TEMPORARY_UNAVAILABLE_EXCEPTION.getStatusCode();
+ i++) {
+ Thread.sleep(100L * (i + 1));
+ result =
+ batchStatement.accept(
+ LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR,
+ statementExecutor.execute(batchStatement));
}
+ } catch (final Exception e) {
+ if (e instanceof InterruptedException) {
+ Thread.currentThread().interrupt();
+ }
+ result = batchStatement.accept(LoadTsFileDataTypeConverter.STATEMENT_EXCEPTION_VISITOR, e);
}
- return executorPool.get();
+ return result;
}
private static boolean handleTSStatus(
diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java
index a46546e..ee2a8fe 100644
--- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java
+++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java
@@ -20,11 +20,7 @@
package org.apache.iotdb.db.storageengine.load.converter;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
-import org.apache.iotdb.commons.conf.IoTDBConstant;
-import org.apache.iotdb.db.auth.AuthorityChecker;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.protocol.session.IClientSession;
-import org.apache.iotdb.db.protocol.session.InternalClientSession;
import org.apache.iotdb.db.protocol.session.SessionManager;
import org.apache.iotdb.db.queryengine.plan.Coordinator;
import org.apache.iotdb.db.queryengine.plan.analyze.ClusterPartitionFetcher;
@@ -37,7 +33,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.time.ZoneId;
import java.util.Optional;
public class LoadTsFileDataTypeConverter {
@@ -73,32 +68,17 @@
}
private TSStatus executeForTreeModel(final Statement statement) {
- final IClientSession session =
- new InternalClientSession(
- String.format(
- "%s_%s",
- LoadTsFileDataTypeConverter.class.getSimpleName(),
- Thread.currentThread().getName()));
- session.setUsername(AuthorityChecker.SUPER_USER);
- session.setClientVersion(IoTDBConstant.ClientVersion.V_1_0);
- session.setZoneId(ZoneId.systemDefault());
-
- SESSION_MANAGER.registerSession(session);
- try {
- return Coordinator.getInstance()
- .executeForTreeModel(
- isGeneratedByPipe ? new PipeEnrichedStatement(statement) : statement,
- SESSION_MANAGER.requestQueryId(),
- SESSION_MANAGER.getSessionInfo(session),
- "",
- ClusterPartitionFetcher.getInstance(),
- ClusterSchemaFetcher.getInstance(),
- IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(),
- false)
- .status;
- } finally {
- SESSION_MANAGER.removeCurrSession();
- }
+ return Coordinator.getInstance()
+ .executeForTreeModel(
+ isGeneratedByPipe ? new PipeEnrichedStatement(statement) : statement,
+ SESSION_MANAGER.requestQueryId(),
+ SESSION_MANAGER.getSessionInfo(SESSION_MANAGER.getCurrSession()),
+ "",
+ ClusterPartitionFetcher.getInstance(),
+ ClusterSchemaFetcher.getInstance(),
+ IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(),
+ false)
+ .status;
}
public boolean isSuccessful(final TSStatus status) {
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java
index 8e69473..a581d11 100644
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java
+++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java
@@ -71,7 +71,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getTimestampPrecision(), deserializeReq.getTimestampPrecision());
}
@@ -94,7 +93,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getInsertNode(), deserializeReq.getInsertNode());
@@ -114,7 +112,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
}
@Test
@@ -137,7 +134,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getPlanNode(), deserializeReq.getPlanNode());
}
@@ -176,7 +172,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
final Statement statement =
req.constructStatement(); // will call PipeTransferTabletRawReq.sortTablet() here
@@ -264,8 +259,6 @@
final PipeTransferTabletBatchReq deserializedReq =
PipeTransferTabletBatchReq.fromTPipeTransferReq(req);
- Assert.assertArrayEquals(
- new byte[] {'a', 'b'}, deserializedReq.getBinaryReqs().get(0).getBody());
Assert.assertEquals(node, deserializedReq.getInsertNodeReqs().get(0).getInsertNode());
Assert.assertEquals(t, deserializedReq.getTabletReqs().get(0).getTablet());
Assert.assertFalse(deserializedReq.getTabletReqs().get(0).getIsAligned());
@@ -283,7 +276,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getFileName(), deserializeReq.getFileName());
Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset());
@@ -302,7 +294,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getFileName(), deserializeReq.getFileName());
Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset());
@@ -321,7 +312,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getFileName(), deserializeReq.getFileName());
Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset());
@@ -339,7 +329,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getFileName(), deserializeReq.getFileName());
Assert.assertEquals(req.getFileLength(), deserializeReq.getFileLength());
@@ -361,7 +350,6 @@
Assert.assertEquals(req.getVersion(), deserializeReq.getVersion());
Assert.assertEquals(req.getType(), deserializeReq.getType());
- Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody());
Assert.assertEquals(req.getFileNames(), deserializeReq.getFileNames());
Assert.assertEquals(req.getFileLengths(), deserializeReq.getFileLengths());
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java
index d97f2da..f590389 100644
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java
+++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java
@@ -19,12 +19,10 @@
package org.apache.iotdb.db.pipe.event;
-import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern;
import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern;
import org.apache.iotdb.commons.pipe.datastructure.pattern.PrefixPipePattern;
import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent;
-import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent;
import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainer;
import org.apache.iotdb.db.pipe.event.common.tsfile.container.query.TsFileInsertionQueryDataContainer;
import org.apache.iotdb.db.pipe.event.common.tsfile.container.scan.TsFileInsertionScanDataContainer;
@@ -524,7 +522,7 @@
}
private void testPartialNullValue(final boolean isQuery)
- throws IOException, WriteProcessException, IllegalPathException {
+ throws IOException, WriteProcessException {
alignedTsFile = new File("0-0-2-0.tsfile");
final List<MeasurementSchema> schemaList = new ArrayList<>();
@@ -558,18 +556,6 @@
final long endTime,
final boolean isQuery,
final int expectedCount) {
- PipeTsFileInsertionEvent tsFileInsertionEvent =
- new PipeTsFileInsertionEvent(
- new TsFileResource(tsFile),
- true,
- false,
- false,
- null,
- 0,
- null,
- null,
- Long.MIN_VALUE,
- Long.MAX_VALUE);
try (final TsFileInsertionDataContainer tsFileContainer =
isQuery
? new TsFileInsertionQueryDataContainer(tsFile, pattern, startTime, endTime)
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java
index 46e9579..8eb725f 100644
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java
+++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java
@@ -20,7 +20,9 @@
package org.apache.iotdb.db.pipe.extractor;
import org.apache.iotdb.commons.conf.IoTDBConstant;
+import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex;
import org.apache.iotdb.commons.path.PartialPath;
+import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta;
import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant;
import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration;
import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskExtractorRuntimeEnvironment;
@@ -33,13 +35,13 @@
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId;
import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode;
import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator;
import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
import org.apache.iotdb.pipe.api.event.Event;
import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent;
import org.apache.tsfile.common.constant.TsFileConstant;
+import org.apache.tsfile.enums.TSDataType;
import org.apache.tsfile.file.metadata.PlainDeviceID;
import org.junit.After;
import org.junit.Assert;
@@ -63,8 +65,6 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
-import static org.mockito.Mockito.mock;
-
public class PipeRealtimeExtractTest {
private static final Logger LOGGER = LoggerFactory.getLogger(PipeRealtimeExtractTest.class);
@@ -106,36 +106,37 @@
public void testRealtimeExtractProcess() {
// set up realtime extractor
- try (PipeRealtimeDataRegionLogExtractor extractor0 = new PipeRealtimeDataRegionLogExtractor();
- PipeRealtimeDataRegionHybridExtractor extractor1 =
+ try (final PipeRealtimeDataRegionLogExtractor extractor0 =
+ new PipeRealtimeDataRegionLogExtractor();
+ final PipeRealtimeDataRegionHybridExtractor extractor1 =
new PipeRealtimeDataRegionHybridExtractor();
- PipeRealtimeDataRegionTsFileExtractor extractor2 =
+ final PipeRealtimeDataRegionTsFileExtractor extractor2 =
new PipeRealtimeDataRegionTsFileExtractor();
- PipeRealtimeDataRegionHybridExtractor extractor3 =
+ final PipeRealtimeDataRegionHybridExtractor extractor3 =
new PipeRealtimeDataRegionHybridExtractor()) {
- PipeParameters parameters0 =
+ final PipeParameters parameters0 =
new PipeParameters(
new HashMap<String, String>() {
{
put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, pattern1);
}
});
- PipeParameters parameters1 =
+ final PipeParameters parameters1 =
new PipeParameters(
new HashMap<String, String>() {
{
put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, pattern2);
}
});
- PipeParameters parameters2 =
+ final PipeParameters parameters2 =
new PipeParameters(
new HashMap<String, String>() {
{
put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, pattern1);
}
});
- PipeParameters parameters3 =
+ final PipeParameters parameters3 =
new PipeParameters(
new HashMap<String, String>() {
{
@@ -143,18 +144,38 @@
}
});
- PipeTaskRuntimeConfiguration configuration0 =
+ final PipeTaskRuntimeConfiguration configuration0 =
new PipeTaskRuntimeConfiguration(
- new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion1), null));
- PipeTaskRuntimeConfiguration configuration1 =
+ new PipeTaskExtractorRuntimeEnvironment(
+ "1",
+ 1,
+ Integer.parseInt(dataRegion1),
+ new PipeTaskMeta(
+ MinimumProgressIndex.INSTANCE, 1, Integer.parseInt(dataRegion1), false)));
+ final PipeTaskRuntimeConfiguration configuration1 =
new PipeTaskRuntimeConfiguration(
- new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion1), null));
- PipeTaskRuntimeConfiguration configuration2 =
+ new PipeTaskExtractorRuntimeEnvironment(
+ "1",
+ 1,
+ Integer.parseInt(dataRegion1),
+ new PipeTaskMeta(
+ MinimumProgressIndex.INSTANCE, 1, Integer.parseInt(dataRegion1), false)));
+ final PipeTaskRuntimeConfiguration configuration2 =
new PipeTaskRuntimeConfiguration(
- new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion2), null));
- PipeTaskRuntimeConfiguration configuration3 =
+ new PipeTaskExtractorRuntimeEnvironment(
+ "1",
+ 1,
+ Integer.parseInt(dataRegion2),
+ new PipeTaskMeta(
+ MinimumProgressIndex.INSTANCE, 1, Integer.parseInt(dataRegion2), false)));
+ final PipeTaskRuntimeConfiguration configuration3 =
new PipeTaskRuntimeConfiguration(
- new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion2), null));
+ new PipeTaskExtractorRuntimeEnvironment(
+ "1",
+ 1,
+ Integer.parseInt(dataRegion2),
+ new PipeTaskMeta(
+ MinimumProgressIndex.INSTANCE, 1, Integer.parseInt(dataRegion2), false)));
// Some parameters of extractor are validated and initialized during the validation process.
extractor0.validate(new PipeParameterValidator(parameters0));
@@ -166,7 +187,7 @@
extractor3.validate(new PipeParameterValidator(parameters3));
extractor3.customize(parameters3, configuration3);
- PipeRealtimeDataRegionExtractor[] extractors =
+ final PipeRealtimeDataRegionExtractor[] extractors =
new PipeRealtimeDataRegionExtractor[] {extractor0, extractor1, extractor2, extractor3};
// start extractor 0, 1
@@ -174,7 +195,7 @@
extractors[1].start();
// test result of extractor 0, 1
- int writeNum = 10;
+ final int writeNum = 10;
List<Future<?>> writeFutures =
Arrays.asList(
write2DataRegion(writeNum, dataRegion1, 0),
@@ -192,7 +213,7 @@
try {
listenFutures.get(0).get(10, TimeUnit.MINUTES);
listenFutures.get(1).get(10, TimeUnit.MINUTES);
- } catch (TimeoutException e) {
+ } catch (final TimeoutException e) {
LOGGER.warn("Time out when listening extractor", e);
alive.set(false);
Assert.fail();
@@ -234,7 +255,7 @@
listenFutures.get(1).get(10, TimeUnit.MINUTES);
listenFutures.get(2).get(10, TimeUnit.MINUTES);
listenFutures.get(3).get(10, TimeUnit.MINUTES);
- } catch (TimeoutException e) {
+ } catch (final TimeoutException e) {
LOGGER.warn("Time out when listening extractor", e);
alive.set(false);
Assert.fail();
@@ -247,34 +268,34 @@
throw new RuntimeException(e);
}
});
- } catch (Exception e) {
+ } catch (final Exception e) {
throw new RuntimeException(e);
}
}
- private Future<?> write2DataRegion(int writeNum, String dataRegionId, int startNum) {
-
- File dataRegionDir =
+ private Future<?> write2DataRegion(
+ final int writeNum, final String dataRegionId, final int startNum) {
+ final File dataRegionDir =
new File(tsFileDir.getPath() + File.separator + dataRegionId + File.separator + "0");
- boolean ignored = dataRegionDir.mkdirs();
+ final boolean ignored = dataRegionDir.mkdirs();
return writeService.submit(
() -> {
for (int i = startNum; i < startNum + writeNum; ++i) {
- File tsFile = new File(dataRegionDir, String.format("%s-%s-0-0.tsfile", i, i));
+ final File tsFile = new File(dataRegionDir, String.format("%s-%s-0-0.tsfile", i, i));
try {
- boolean ignored1 = tsFile.createNewFile();
- } catch (IOException e) {
+ final boolean ignored1 = tsFile.createNewFile();
+ } catch (final IOException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
- TsFileResource resource = new TsFileResource(tsFile);
+ final TsFileResource resource = new TsFileResource(tsFile);
resource.updateStartTime(
new PlainDeviceID(String.join(TsFileConstant.PATH_SEPARATOR, device)), 0);
try {
resource.close();
- } catch (IOException e) {
+ } catch (final IOException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
@@ -282,29 +303,27 @@
PipeInsertionDataNodeListener.getInstance()
.listenToInsertNode(
dataRegionId,
- mock(WALEntryHandler.class),
new InsertRowNode(
new PlanNodeId(String.valueOf(i)),
new PartialPath(device),
false,
new String[] {"a"},
- null,
+ new TSDataType[] {TSDataType.INT32},
0,
- null,
+ new Integer[] {1},
false),
resource);
PipeInsertionDataNodeListener.getInstance()
.listenToInsertNode(
dataRegionId,
- mock(WALEntryHandler.class),
new InsertRowNode(
new PlanNodeId(String.valueOf(i)),
new PartialPath(device),
false,
new String[] {"b"},
- null,
+ new TSDataType[] {TSDataType.INT32},
0,
- null,
+ new Integer[] {1},
false),
resource);
PipeInsertionDataNodeListener.getInstance()
@@ -314,7 +333,9 @@
}
private Future<?> listen(
- PipeRealtimeDataRegionExtractor extractor, Function<Event, Integer> weight, int expectNum) {
+ final PipeRealtimeDataRegionExtractor extractor,
+ final Function<Event, Integer> weight,
+ final int expectNum) {
return listenerService.submit(
() -> {
int eventNum = 0;
@@ -323,7 +344,7 @@
Event event;
try {
event = extractor.supply();
- } catch (Exception e) {
+ } catch (final Exception e) {
throw new RuntimeException(e);
}
if (event != null) {
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/pattern/CachedSchemaPatternMatcherTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/pattern/CachedSchemaPatternMatcherTest.java
deleted file mode 100644
index 2e3e57c..0000000
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/pattern/CachedSchemaPatternMatcherTest.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.pattern;
-
-import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant;
-import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration;
-import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskExtractorRuntimeEnvironment;
-import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern;
-import org.apache.iotdb.commons.pipe.datastructure.pattern.PrefixPipePattern;
-import org.apache.iotdb.commons.pipe.event.EnrichedEvent;
-import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent;
-import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor;
-import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch;
-import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.matcher.CachedSchemaPatternMatcher;
-import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters;
-import org.apache.iotdb.pipe.api.event.Event;
-
-import org.apache.tsfile.common.constant.TsFileConstant;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-public class CachedSchemaPatternMatcherTest {
-
- private static class MockedPipeRealtimeEvent extends PipeRealtimeEvent {
-
- public MockedPipeRealtimeEvent(
- EnrichedEvent event,
- TsFileEpoch tsFileEpoch,
- Map<String, String[]> device2Measurements,
- PipePattern pattern) {
- super(event, tsFileEpoch, device2Measurements, pattern);
- }
-
- @Override
- public boolean shouldParseTime() {
- return false;
- }
-
- @Override
- public boolean shouldParsePattern() {
- return false;
- }
- }
-
- private CachedSchemaPatternMatcher matcher;
- private ExecutorService executorService;
- private List<PipeRealtimeDataRegionExtractor> extractors;
-
- @Before
- public void setUp() {
- matcher = new CachedSchemaPatternMatcher();
- executorService = Executors.newSingleThreadExecutor();
- extractors = new ArrayList<>();
- }
-
- @After
- public void tearDown() {
- executorService.shutdownNow();
- }
-
- @Test
- public void testCachedMatcher() throws Exception {
- PipeRealtimeDataRegionExtractor dataRegionExtractor = new PipeRealtimeDataRegionFakeExtractor();
- dataRegionExtractor.customize(
- new PipeParameters(
- new HashMap<String, String>() {
- {
- put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, "root");
- }
- }),
- new PipeTaskRuntimeConfiguration(new PipeTaskExtractorRuntimeEnvironment("1", 1, 1, null)));
- extractors.add(dataRegionExtractor);
-
- int deviceExtractorNum = 10;
- int seriesExtractorNum = 10;
- for (int i = 0; i < deviceExtractorNum; i++) {
- PipeRealtimeDataRegionExtractor deviceExtractor = new PipeRealtimeDataRegionFakeExtractor();
- int finalI1 = i;
- deviceExtractor.customize(
- new PipeParameters(
- new HashMap<String, String>() {
- {
- put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, "root." + finalI1);
- }
- }),
- new PipeTaskRuntimeConfiguration(
- new PipeTaskExtractorRuntimeEnvironment("1", 1, 1, null)));
- extractors.add(deviceExtractor);
- for (int j = 0; j < seriesExtractorNum; j++) {
- PipeRealtimeDataRegionExtractor seriesExtractor = new PipeRealtimeDataRegionFakeExtractor();
- int finalI = i;
- int finalJ = j;
- seriesExtractor.customize(
- new PipeParameters(
- new HashMap<String, String>() {
- {
- put(
- PipeExtractorConstant.EXTRACTOR_PATTERN_KEY,
- "root." + finalI + "." + finalJ);
- }
- }),
- new PipeTaskRuntimeConfiguration(
- new PipeTaskExtractorRuntimeEnvironment("1", 1, 1, null)));
- extractors.add(seriesExtractor);
- }
- }
-
- Future<?> future =
- executorService.submit(() -> extractors.forEach(extractor -> matcher.register(extractor)));
-
- int epochNum = 10000;
- int deviceNum = 1000;
- int seriesNum = 100;
- Map<String, String[]> deviceMap =
- IntStream.range(0, deviceNum)
- .mapToObj(String::valueOf)
- .collect(Collectors.toMap(s -> "root." + s, s -> new String[0]));
- String[] measurements =
- IntStream.range(0, seriesNum).mapToObj(String::valueOf).toArray(String[]::new);
- long totalTime = 0;
- for (int i = 0; i < epochNum; i++) {
- for (int j = 0; j < deviceNum; j++) {
- MockedPipeRealtimeEvent event =
- new MockedPipeRealtimeEvent(
- null, null, Collections.singletonMap("root." + i, measurements), null);
- long startTime = System.currentTimeMillis();
- matcher.match(event).forEach(extractor -> extractor.extract(event));
- totalTime += (System.currentTimeMillis() - startTime);
- }
- MockedPipeRealtimeEvent event = new MockedPipeRealtimeEvent(null, null, deviceMap, null);
- long startTime = System.currentTimeMillis();
- matcher.match(event).forEach(extractor -> extractor.extract(event));
- totalTime += (System.currentTimeMillis() - startTime);
- }
- System.out.println("matcher.getRegisterCount() = " + matcher.getRegisterCount());
- System.out.println("totalTime = " + totalTime);
- System.out.println(
- "device match per second = "
- + ((double) (epochNum * (deviceNum + 1)) / (double) (totalTime) * 1000.0));
-
- future.get();
- }
-
- public static class PipeRealtimeDataRegionFakeExtractor extends PipeRealtimeDataRegionExtractor {
-
- public PipeRealtimeDataRegionFakeExtractor() {
- pipePattern = new PrefixPipePattern(null);
- }
-
- @Override
- public Event supply() {
- return null;
- }
-
- @Override
- protected void doExtract(PipeRealtimeEvent event) {
- final boolean[] match = {false};
- event
- .getSchemaInfo()
- .forEach(
- (k, v) -> {
- if (v.length > 0) {
- for (String s : v) {
- match[0] =
- match[0]
- || (k + TsFileConstant.PATH_SEPARATOR + s)
- .startsWith(getPatternString());
- }
- } else {
- match[0] =
- match[0]
- || (getPatternString().startsWith(k) || k.startsWith(getPatternString()));
- }
- });
- Assert.assertTrue(match[0]);
- }
-
- @Override
- public boolean isNeedListenToTsFile() {
- return true;
- }
-
- @Override
- public boolean isNeedListenToInsertNode() {
- return true;
- }
- }
-}
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java
index 0281bb0..5c5dc84 100644
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java
+++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java
@@ -24,7 +24,6 @@
import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.commons.utils.FileUtils;
import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent;
-import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResource;
import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager;
import org.apache.iotdb.db.storageengine.dataregion.modification.Deletion;
import org.apache.iotdb.db.storageengine.dataregion.modification.Modification;
@@ -47,9 +46,7 @@
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
-import java.util.concurrent.TimeUnit;
-import static org.awaitility.Awaitility.await;
import static org.junit.Assert.fail;
public class PipeTsFileResourceManagerTest {
@@ -59,6 +56,7 @@
ROOT_DIR + File.separator + IoTDBConstant.SEQUENCE_FOLDER_NAME;
private static final String TS_FILE_NAME = SEQUENCE_DIR + File.separator + "test.tsfile";
private static final String MODS_FILE_NAME = TS_FILE_NAME + ".mods";
+ private static final String PIPE_NAME = "pipe";
private PipeTsFileResourceManager pipeTsFileResourceManager;
@@ -156,53 +154,55 @@
}
@Test
- public void testIncreaseTsfile() throws IOException {
- File originTsfile = new File(TS_FILE_NAME);
- File originModFile = new File(MODS_FILE_NAME);
- Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originTsfile));
- Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile));
+ public void testIncreaseTsFile() throws IOException {
+ final File originTsfile = new File(TS_FILE_NAME);
+ final File originModFile = new File(MODS_FILE_NAME);
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originTsfile, null));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile, null));
- File pipeTsfile = pipeTsFileResourceManager.increaseFileReference(originTsfile, true, null);
- File pipeModFile = pipeTsFileResourceManager.increaseFileReference(originModFile, false, null);
- Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile));
- Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile));
+ final File pipeTsfile =
+ pipeTsFileResourceManager.increaseFileReference(originTsfile, true, PIPE_NAME);
+ final File pipeModFile =
+ pipeTsFileResourceManager.increaseFileReference(originModFile, false, PIPE_NAME);
+ Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null));
Assert.assertTrue(Files.exists(originTsfile.toPath()));
Assert.assertTrue(Files.exists(originModFile.toPath()));
Assert.assertTrue(Files.exists(pipeTsfile.toPath()));
Assert.assertTrue(Files.exists(pipeModFile.toPath()));
- pipeTsFileResourceManager.increaseFileReference(originTsfile, true, null);
- pipeTsFileResourceManager.increaseFileReference(originModFile, false, null);
- Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile));
- Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile));
-
- // test use hardlinkTsFile to increase reference counts
- pipeTsFileResourceManager.increaseFileReference(pipeTsfile, true, null);
- Assert.assertEquals(3, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile));
+ // test use assigner's hardlinkTsFile to increase reference counts
+ // test null, shall not reuse the pipe's tsFile
+ pipeTsFileResourceManager.increaseFileReference(pipeTsfile, true, PIPE_NAME);
+ Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null));
+ Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, PIPE_NAME));
Assert.assertTrue(Files.exists(originTsfile.toPath()));
Assert.assertTrue(Files.exists(pipeTsfile.toPath()));
// test use copyFile to increase reference counts
- pipeTsFileResourceManager.increaseFileReference(pipeModFile, false, null);
- Assert.assertEquals(3, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile));
+ pipeTsFileResourceManager.increaseFileReference(pipeModFile, false, PIPE_NAME);
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null));
+ Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, PIPE_NAME));
Assert.assertTrue(Files.exists(originModFile.toPath()));
Assert.assertTrue(Files.exists(pipeModFile.toPath()));
}
@Test
- public void testDecreaseTsfile() throws IOException {
- File originFile = new File(TS_FILE_NAME);
- File originModFile = new File(MODS_FILE_NAME);
+ public void testDecreaseTsFile() throws IOException {
+ final File originFile = new File(TS_FILE_NAME);
+ final File originModFile = new File(MODS_FILE_NAME);
- pipeTsFileResourceManager.decreaseFileReference(originFile);
- pipeTsFileResourceManager.decreaseFileReference(originModFile);
- Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originFile));
- Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile));
+ pipeTsFileResourceManager.decreaseFileReference(originFile, PIPE_NAME);
+ pipeTsFileResourceManager.decreaseFileReference(originModFile, PIPE_NAME);
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originFile, null));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile, null));
- File pipeTsfile = pipeTsFileResourceManager.increaseFileReference(originFile, true, null);
- File pipeModFile = pipeTsFileResourceManager.increaseFileReference(originModFile, false, null);
- Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile));
- Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile));
+ final File pipeTsfile =
+ pipeTsFileResourceManager.increaseFileReference(originFile, true, PIPE_NAME);
+ final File pipeModFile =
+ pipeTsFileResourceManager.increaseFileReference(originModFile, false, PIPE_NAME);
+ Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null));
Assert.assertTrue(Files.exists(pipeTsfile.toPath()));
Assert.assertTrue(Files.exists(pipeModFile.toPath()));
Assert.assertTrue(Files.exists(pipeTsfile.toPath()));
@@ -213,26 +213,22 @@
Assert.assertFalse(Files.exists(originFile.toPath()));
Assert.assertFalse(Files.exists(originModFile.toPath()));
- Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile));
- Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile));
+ Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, PIPE_NAME));
+ Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null));
+ Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, PIPE_NAME));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null));
Assert.assertFalse(Files.exists(originFile.toPath()));
Assert.assertFalse(Files.exists(originModFile.toPath()));
Assert.assertTrue(Files.exists(pipeTsfile.toPath()));
Assert.assertTrue(Files.exists(pipeModFile.toPath()));
- pipeTsFileResourceManager.decreaseFileReference(pipeTsfile);
- pipeTsFileResourceManager.decreaseFileReference(pipeModFile);
- Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile));
- Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile));
+ pipeTsFileResourceManager.decreaseFileReference(pipeTsfile, PIPE_NAME);
+ pipeTsFileResourceManager.decreaseFileReference(pipeModFile, PIPE_NAME);
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, PIPE_NAME));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, PIPE_NAME));
+ Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null));
Assert.assertFalse(Files.exists(originFile.toPath()));
Assert.assertFalse(Files.exists(originModFile.toPath()));
- // Pipe TsFile will be cleaned by a timed thread, so we wait some time here.
- await()
- .atMost(3 * PipeTsFileResource.TSFILE_MIN_TIME_TO_LIVE_IN_MS, TimeUnit.MILLISECONDS)
- .untilAsserted(
- () -> {
- Assert.assertFalse(Files.exists(pipeTsfile.toPath()));
- Assert.assertFalse(Files.exists(pipeModFile.toPath()));
- });
}
}
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeWALHardlinkResourceManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeWALHardlinkResourceManagerTest.java
deleted file mode 100644
index 33e47af..0000000
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeWALHardlinkResourceManagerTest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.pipe.resource;
-
-import org.apache.iotdb.commons.conf.IoTDBConstant;
-import org.apache.iotdb.commons.utils.FileUtils;
-import org.apache.iotdb.db.pipe.resource.wal.hardlink.PipeWALHardlinkResourceManager;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-
-public class PipeWALHardlinkResourceManagerTest {
- private static final String ROOT_DIR = "target" + File.separator + "PipeWALHolderTest";
-
- private static final String WAL_DIR = ROOT_DIR + File.separator + IoTDBConstant.WAL_FOLDER_NAME;
-
- private static final String WAL_NAME = WAL_DIR + File.separator + "test.wal";
-
- private PipeWALHardlinkResourceManager pipeWALHardlinkResourceManager;
-
- @Before
- public void setUp() throws Exception {
- pipeWALHardlinkResourceManager = new PipeWALHardlinkResourceManager();
-
- createWAL();
- }
-
- private void createWAL() {
- File file = new File(WAL_NAME);
- if (file.exists()) {
- boolean ignored = file.delete();
- }
-
- try {
- file.getParentFile().mkdirs();
- file.createNewFile();
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
-
- @After
- public void tearDown() throws Exception {
- File pipeFolder = new File(ROOT_DIR);
- if (pipeFolder.exists()) {
- FileUtils.deleteFileOrDirectory(pipeFolder);
- }
- }
-
- @Test
- public void testIncreaseTsfile() throws IOException {
- File originWALFile = new File(WAL_NAME);
- Assert.assertEquals(0, pipeWALHardlinkResourceManager.getFileReferenceCount(originWALFile));
-
- File pipeWALFile = pipeWALHardlinkResourceManager.increaseFileReference(originWALFile);
- Assert.assertEquals(1, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile));
- Assert.assertTrue(Files.exists(originWALFile.toPath()));
- Assert.assertTrue(Files.exists(pipeWALFile.toPath()));
-
- // test use hardlinkTsFile to increase reference counts
- pipeWALHardlinkResourceManager.increaseFileReference(pipeWALFile);
- Assert.assertEquals(2, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile));
- Assert.assertTrue(Files.exists(originWALFile.toPath()));
- Assert.assertTrue(Files.exists(pipeWALFile.toPath()));
- }
-
- @Test
- public void testDecreaseTsfile() throws IOException {
- File originFile = new File(WAL_NAME);
-
- pipeWALHardlinkResourceManager.decreaseFileReference(originFile);
- Assert.assertEquals(0, pipeWALHardlinkResourceManager.getFileReferenceCount(originFile));
-
- File pipeWALFile = pipeWALHardlinkResourceManager.increaseFileReference(originFile);
- Assert.assertEquals(1, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile));
- Assert.assertTrue(Files.exists(pipeWALFile.toPath()));
- Assert.assertTrue(Files.exists(pipeWALFile.toPath()));
-
- Assert.assertTrue(originFile.delete());
- Assert.assertFalse(Files.exists(originFile.toPath()));
-
- Assert.assertEquals(1, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile));
- Assert.assertFalse(Files.exists(originFile.toPath()));
- Assert.assertTrue(Files.exists(pipeWALFile.toPath()));
-
- pipeWALHardlinkResourceManager.decreaseFileReference(pipeWALFile);
- Assert.assertEquals(0, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile));
- Assert.assertFalse(Files.exists(originFile.toPath()));
- Assert.assertFalse(Files.exists(pipeWALFile.toPath()));
- }
-}
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java
index 33bf801..87b2588 100644
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java
+++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java
@@ -128,30 +128,23 @@
Assert.assertTrue(
hybridProgressIndex.isAfter(new RecoverProgressIndex(3, new SimpleProgressIndex(5, 4))));
- Assert.assertTrue(
- new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
+ Assert.assertTrue(new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndex()));
indexList.forEach(tsFileResource::updateProgressIndex);
+ Assert.assertFalse(new MockProgressIndex(-1).isAfter(tsFileResource.getMaxProgressIndex()));
+ Assert.assertFalse(new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndex()));
+ Assert.assertFalse(new MockProgressIndex(1).isAfter(tsFileResource.getMaxProgressIndex()));
Assert.assertFalse(
- new MockProgressIndex(-1).isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
- Assert.assertFalse(
- new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
- Assert.assertFalse(
- new MockProgressIndex(1).isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
- Assert.assertFalse(
- new MockProgressIndex(INDEX_NUM - 1)
- .isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
+ new MockProgressIndex(INDEX_NUM - 1).isAfter(tsFileResource.getMaxProgressIndex()));
Assert.assertTrue(
- new MockProgressIndex(INDEX_NUM).isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
+ new MockProgressIndex(INDEX_NUM).isAfter(tsFileResource.getMaxProgressIndex()));
Assert.assertTrue(
- new MockProgressIndex(Integer.MAX_VALUE)
- .isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
+ new MockProgressIndex(Integer.MAX_VALUE).isAfter(tsFileResource.getMaxProgressIndex()));
Assert.assertFalse(
- new MockProgressIndex(1, INDEX_NUM - 1)
- .isAfter(tsFileResource.getMaxProgressIndexAfterClose()));
+ new MockProgressIndex(1, INDEX_NUM - 1).isAfter(tsFileResource.getMaxProgressIndex()));
}
@Test
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManagerTest.java
deleted file mode 100644
index 9c9290f..0000000
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManagerTest.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint;
-
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable;
-import org.apache.iotdb.db.storageengine.dataregion.wal.io.CheckpointReader;
-import org.apache.iotdb.db.storageengine.dataregion.wal.recover.CheckpointRecoverUtils;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.CheckpointFileUtils;
-import org.apache.iotdb.db.utils.EnvironmentUtils;
-import org.apache.iotdb.db.utils.constant.TestConstant;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class CheckpointManagerTest {
- private static final String database = "root.test";
- private static final String dataRegionId = "1";
- private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- private static final String identifier = String.valueOf(Integer.MAX_VALUE);
- private static final String logDirectory = TestConstant.BASE_OUTPUT_PATH.concat("wal-test");
- private CheckpointManager checkpointManager;
- private long prevFileSize;
-
- @Before
- public void setUp() throws Exception {
- EnvironmentUtils.cleanDir(logDirectory);
- prevFileSize = config.getCheckpointFileSizeThresholdInByte();
- config.setCheckpointFileSizeThresholdInByte(10 * 1024);
- checkpointManager = new CheckpointManager(identifier, logDirectory);
- }
-
- @After
- public void tearDown() throws Exception {
- checkpointManager.close();
- config.setCheckpointFileSizeThresholdInByte(prevFileSize);
- EnvironmentUtils.cleanDir(logDirectory);
- }
-
- @Test
- public void testNewFile() {
- Checkpoint initCheckpoint =
- new Checkpoint(CheckpointType.GLOBAL_MEMORY_TABLE_INFO, Collections.emptyList());
- List<Checkpoint> expectedCheckpoints = Collections.singletonList(initCheckpoint);
- CheckpointReader checkpointReader =
- new CheckpointReader(
- new File(logDirectory + File.separator + CheckpointFileUtils.getLogFileName(0)));
- List<Checkpoint> actualCheckpoints = checkpointReader.getCheckpoints();
- assertEquals(expectedCheckpoints, actualCheckpoints);
- }
-
- @Test
- public void testConcurrentWrite() throws Exception {
- // start write threads to write concurrently
- int threadsNum = 5;
- ExecutorService executorService = Executors.newFixedThreadPool(threadsNum);
- List<Future<Void>> futures = new ArrayList<>();
- Map<Long, MemTableInfo> expectedMemTableId2Info = new ConcurrentHashMap<>();
- Map<Long, Long> versionId2memTableId = new ConcurrentHashMap<>();
- // create 10 memTables, and flush the first 5 of them
- int memTablesNum = 10;
- for (int i = 0; i < memTablesNum; ++i) {
- long versionId = i;
- Callable<Void> writeTask =
- () -> {
- String tsFilePath = logDirectory + File.separator + versionId + ".tsfile";
- MemTableInfo memTableInfo =
- new MemTableInfo(
- new PrimitiveMemTable(database, dataRegionId), tsFilePath, versionId);
- versionId2memTableId.put(versionId, memTableInfo.getMemTableId());
- checkpointManager.makeCreateMemTableCPInMemory(memTableInfo);
- checkpointManager.makeCreateMemTableCPOnDisk(memTableInfo.getMemTableId());
- if (versionId < memTablesNum / 2) {
- checkpointManager.makeFlushMemTableCP(versionId2memTableId.get(versionId));
- } else {
- expectedMemTableId2Info.put(memTableInfo.getMemTableId(), memTableInfo);
- }
- return null;
- };
- Future<Void> future = executorService.submit(writeTask);
- futures.add(future);
- }
- // wait until all write tasks are done
- for (Future<Void> future : futures) {
- future.get();
- }
- executorService.shutdown();
- // check first valid version id
- assertEquals(memTablesNum / 2, checkpointManager.getFirstValidWALVersionId());
- // recover info from checkpoint file
- Map<Long, MemTableInfo> actualMemTableId2Info =
- CheckpointRecoverUtils.recoverMemTableInfo(new File(logDirectory)).getMemTableId2Info();
- assertEquals(expectedMemTableId2Info, actualMemTableId2Info);
- }
-
- @Test
- public void testTriggerLogRoller() {
- // create memTables until reach LOG_SIZE_LIMIT, and flush the first 5 of them
- int size = 0;
- long versionId = 0;
- Map<Long, MemTableInfo> expectedMemTableId2Info = new HashMap<>();
- Map<Long, Long> versionId2memTableId = new HashMap<>();
- while (size < config.getCheckpointFileSizeThresholdInByte()) {
- ++versionId;
- String tsFilePath = logDirectory + File.separator + versionId + ".tsfile";
- MemTableInfo memTableInfo =
- new MemTableInfo(new PrimitiveMemTable(database, dataRegionId), tsFilePath, versionId);
- versionId2memTableId.put(versionId, memTableInfo.getMemTableId());
- Checkpoint checkpoint =
- new Checkpoint(
- CheckpointType.CREATE_MEMORY_TABLE, Collections.singletonList(memTableInfo));
- size += checkpoint.serializedSize();
- checkpointManager.makeCreateMemTableCPInMemory(memTableInfo);
- checkpointManager.makeCreateMemTableCPOnDisk(memTableInfo.getMemTableId());
- if (versionId < 5) {
- checkpoint =
- new Checkpoint(
- CheckpointType.FLUSH_MEMORY_TABLE, Collections.singletonList(memTableInfo));
- size += checkpoint.serializedSize();
- checkpointManager.makeFlushMemTableCP(versionId2memTableId.get(versionId));
- } else {
- expectedMemTableId2Info.put(memTableInfo.getMemTableId(), memTableInfo);
- }
- }
- checkpointManager.fsyncCheckpointFile();
- // check first valid version id
- assertEquals(5, checkpointManager.getFirstValidWALVersionId());
- // check checkpoint files
- assertFalse(
- new File(logDirectory + File.separator + CheckpointFileUtils.getLogFileName(0)).exists());
- assertTrue(
- new File(logDirectory + File.separator + CheckpointFileUtils.getLogFileName(1)).exists());
- // recover info from checkpoint file
- Map<Long, MemTableInfo> actualMemTableId2Info =
- CheckpointRecoverUtils.recoverMemTableInfo(new File(logDirectory)).getMemTableId2Info();
- assertEquals(expectedMemTableId2Info, actualMemTableId2Info);
- }
-}
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALEntryHandlerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALEntryHandlerTest.java
deleted file mode 100644
index d5913e5..0000000
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALEntryHandlerTest.java
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.storageengine.dataregion.wal.node;
-
-import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.consensus.iot.log.ConsensusReqReader;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId;
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode;
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode;
-import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable;
-import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable;
-import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry;
-import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.CheckpointManager;
-import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.MemTableInfo;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener;
-import org.apache.iotdb.db.utils.EnvironmentUtils;
-import org.apache.iotdb.db.utils.constant.TestConstant;
-
-import org.apache.tsfile.common.conf.TSFileConfig;
-import org.apache.tsfile.enums.TSDataType;
-import org.apache.tsfile.utils.Binary;
-import org.apache.tsfile.write.schema.MeasurementSchema;
-import org.awaitility.Awaitility;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-public class WALEntryHandlerTest {
- private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- private static final String identifier1 = String.valueOf(Integer.MAX_VALUE);
- private static final String identifier2 = String.valueOf(Integer.MAX_VALUE - 1);
- private static final String logDirectory1 =
- TestConstant.BASE_OUTPUT_PATH.concat("wal-test" + identifier1);
- private static final String logDirectory2 =
- TestConstant.BASE_OUTPUT_PATH.concat("wal-test" + identifier2);
-
- private static final String databasePath = "root.test_sg";
- private static final String devicePath = databasePath + ".test_d";
- private static final String dataRegionId = "1";
- private WALMode prevMode;
- private WALNode walNode1;
- private WALNode walNode2;
-
- @Before
- public void setUp() throws Exception {
- EnvironmentUtils.cleanDir(logDirectory1);
- EnvironmentUtils.cleanDir(logDirectory2);
- prevMode = config.getWalMode();
- config.setWalMode(WALMode.SYNC);
- walNode1 = new WALNode(identifier1, logDirectory1);
- walNode2 = new WALNode(identifier2, logDirectory2);
- }
-
- @After
- public void tearDown() throws Exception {
- walNode1.close();
- walNode2.close();
- config.setWalMode(prevMode);
- EnvironmentUtils.cleanDir(logDirectory1);
- EnvironmentUtils.cleanDir(logDirectory2);
- WALInsertNodeCache.getInstance().clear();
- }
-
- @Test(expected = MemTablePinException.class)
- public void pinDeletedMemTable1() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- WALFlushListener flushListener =
- walNode1.log(
- memTable.getMemTableId(), getInsertRowNode(devicePath, System.currentTimeMillis()));
- walNode1.onMemTableFlushed(memTable);
- Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed());
- // pin flushed memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- }
-
- @Test(expected = MemTablePinException.class)
- public void pinDeletedMemTable2() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- WALFlushListener flushListener =
- walNode1.log(
- memTable.getMemTableId(), getInsertRowsNode(devicePath, System.currentTimeMillis()));
- walNode1.onMemTableFlushed(memTable);
- Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed());
- // pin flushed memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- }
-
- @Test
- public void pinMemTable1() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- // roll wal file
- walNode1.rollWALFile();
- InsertRowNode node2 = getInsertRowNode(devicePath, System.currentTimeMillis());
- node2.setSearchIndex(2);
- walNode1.log(memTable.getMemTableId(), node2);
- walNode1.onMemTableFlushed(memTable);
- walNode1.rollWALFile();
- // find node1
- ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1);
- assertTrue(itr.hasNext());
- assertEquals(
- node1,
- WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer()));
- // try to delete flushed but pinned memTable
- walNode1.deleteOutdatedFiles();
- // try to find node1
- itr = walNode1.getReqIterator(1);
- assertTrue(itr.hasNext());
- assertEquals(
- node1,
- WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer()));
- }
-
- @Test
- public void pinMemTable2() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- // roll wal file
- walNode1.rollWALFile();
- InsertRowsNode node2 = getInsertRowsNode(devicePath, System.currentTimeMillis());
- node2.setSearchIndex(2);
- walNode1.log(memTable.getMemTableId(), node2);
- walNode1.onMemTableFlushed(memTable);
- walNode1.rollWALFile();
- // find node1
- ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1);
- assertTrue(itr.hasNext());
- assertEquals(
- node1,
- WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer()));
- // try to delete flushed but pinned memTable
- walNode1.deleteOutdatedFiles();
- // try to find node1
- itr = walNode1.getReqIterator(1);
- assertTrue(itr.hasNext());
- assertEquals(
- node1,
- WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer()));
- }
-
- @Test(expected = MemTablePinException.class)
- public void unpinDeletedMemTable1() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- WALFlushListener flushListener =
- walNode1.log(
- memTable.getMemTableId(), getInsertRowNode(devicePath, System.currentTimeMillis()));
- walNode1.onMemTableFlushed(memTable);
- // pin flushed memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.unpinMemTable();
- }
-
- @Test(expected = MemTablePinException.class)
- public void unpinDeletedMemTable2() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- WALFlushListener flushListener =
- walNode1.log(
- memTable.getMemTableId(), getInsertRowsNode(devicePath, System.currentTimeMillis()));
- walNode1.onMemTableFlushed(memTable);
- // pin flushed memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.unpinMemTable();
- }
-
- @Test
- public void unpinFlushedMemTable1() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- WALFlushListener flushListener =
- walNode1.log(
- memTable.getMemTableId(), getInsertRowNode(devicePath, System.currentTimeMillis()));
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- // pin twice
- handler.pinMemTable();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed());
- // unpin 1
- CheckpointManager checkpointManager = walNode1.getCheckpointManager();
- handler.unpinMemTable();
- MemTableInfo oldestMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo();
- assertNull(oldestMemTableInfo);
- // unpin 2
- handler.unpinMemTable();
- assertNull(checkpointManager.getOldestUnpinnedMemTableInfo());
- }
-
- @Test
- public void unpinFlushedMemTable2() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- WALFlushListener flushListener =
- walNode1.log(
- memTable.getMemTableId(), getInsertRowsNode(devicePath, System.currentTimeMillis()));
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- // pin twice
- handler.pinMemTable();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed());
- // unpin 1
- CheckpointManager checkpointManager = walNode1.getCheckpointManager();
- handler.unpinMemTable();
- MemTableInfo oldestMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo();
- assertNull(oldestMemTableInfo);
- // unpin 2
- handler.unpinMemTable();
- assertNull(checkpointManager.getOldestUnpinnedMemTableInfo());
- }
-
- @Test
- public void unpinMemTable1() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- // roll wal file
- walNode1.rollWALFile();
- walNode1.rollWALFile();
- // find node1
- ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1);
- assertTrue(itr.hasNext());
- assertEquals(
- node1,
- WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer()));
- // unpin flushed memTable
- handler.unpinMemTable();
- // try to delete flushed but pinned memTable
- walNode1.deleteOutdatedFiles();
- // try to find node1
- itr = walNode1.getReqIterator(1);
- assertFalse(itr.hasNext());
- }
-
- @Test
- public void unpinMemTable2() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- // roll wal file
- walNode1.rollWALFile();
- walNode1.rollWALFile();
- // find node1
- ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1);
- assertTrue(itr.hasNext());
- assertEquals(
- node1,
- WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer()));
- // unpin flushed memTable
- handler.unpinMemTable();
- // try to delete flushed but pinned memTable
- walNode1.deleteOutdatedFiles();
- // try to find node1
- itr = walNode1.getReqIterator(1);
- assertFalse(itr.hasNext());
- }
-
- @Test
- public void getUnFlushedValue1() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- assertEquals(node1, handler.getInsertNode());
- }
-
- @Test
- public void getUnFlushedValue2() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- assertEquals(node1, handler.getInsertNode());
- }
-
- @Test
- public void getFlushedValue1() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- // wait until wal flushed
- Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed());
- assertEquals(node1, handler.getInsertNode());
- }
-
- @Test
- public void getFlushedValue2() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile");
- InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1);
- // pin memTable
- WALEntryHandler handler = flushListener.getWalEntryHandler();
- handler.pinMemTable();
- walNode1.onMemTableFlushed(memTable);
- // wait until wal flushed
- Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed());
- assertEquals(node1, handler.getInsertNode());
- }
-
- @Test
- public void testConcurrentGetValue1() throws Exception {
- int threadsNum = 10;
- ExecutorService executorService = Executors.newFixedThreadPool(threadsNum);
- List<Future<Void>> futures = new ArrayList<>();
- for (int i = 0; i < threadsNum; ++i) {
- WALNode walNode = i % 2 == 0 ? walNode1 : walNode2;
- String logDirectory = i % 2 == 0 ? logDirectory1 : logDirectory2;
- Callable<Void> writeTask =
- () -> {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile");
-
- List<WALFlushListener> walFlushListeners = new ArrayList<>();
- List<InsertRowNode> expectedInsertRowNodes = new ArrayList<>();
- try {
- for (int j = 0; j < 1_000; ++j) {
- long memTableId = memTable.getMemTableId();
- InsertRowNode node =
- getInsertRowNode(devicePath + memTableId, System.currentTimeMillis());
- expectedInsertRowNodes.add(node);
- WALFlushListener walFlushListener = walNode.log(memTableId, node);
- walFlushListeners.add(walFlushListener);
- }
- } catch (IllegalPathException e) {
- fail();
- }
-
- // wait until wal flushed
- Awaitility.await().until(walNode::isAllWALEntriesConsumed);
-
- walFlushListeners.get(0).getWalEntryHandler().pinMemTable();
- walNode.onMemTableFlushed(memTable);
-
- for (int j = 0; j < expectedInsertRowNodes.size(); ++j) {
- InsertRowNode expect = expectedInsertRowNodes.get(j);
- InsertRowNode actual =
- (InsertRowNode) walFlushListeners.get(j).getWalEntryHandler().getInsertNode();
- assertEquals(expect, actual);
- }
-
- walFlushListeners.get(0).getWalEntryHandler().unpinMemTable();
- return null;
- };
- Future<Void> future = executorService.submit(writeTask);
- futures.add(future);
- }
- // wait until all write tasks are done
- for (Future<Void> future : futures) {
- future.get();
- }
- executorService.shutdown();
- }
-
- @Test
- public void testConcurrentGetValue2() throws Exception {
- int threadsNum = 10;
- ExecutorService executorService = Executors.newFixedThreadPool(threadsNum);
- List<Future<Void>> futures = new ArrayList<>();
- for (int i = 0; i < threadsNum; ++i) {
- WALNode walNode = i % 2 == 0 ? walNode1 : walNode2;
- String logDirectory = i % 2 == 0 ? logDirectory1 : logDirectory2;
- Callable<Void> writeTask =
- () -> {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile");
-
- List<WALFlushListener> walFlushListeners = new ArrayList<>();
- List<InsertRowsNode> expectedInsertRowsNodes = new ArrayList<>();
- try {
- for (int j = 0; j < 1_000; ++j) {
- long memTableId = memTable.getMemTableId();
- InsertRowsNode node =
- getInsertRowsNode(devicePath + memTableId, System.currentTimeMillis());
- expectedInsertRowsNodes.add(node);
- WALFlushListener walFlushListener = walNode.log(memTableId, node);
- walFlushListeners.add(walFlushListener);
- }
- } catch (IllegalPathException e) {
- fail();
- }
-
- // wait until wal flushed
- Awaitility.await().until(walNode::isAllWALEntriesConsumed);
-
- walFlushListeners.get(0).getWalEntryHandler().pinMemTable();
- walNode.onMemTableFlushed(memTable);
-
- for (int j = 0; j < expectedInsertRowsNodes.size(); ++j) {
- InsertRowsNode expect = expectedInsertRowsNodes.get(j);
- InsertRowsNode actual =
- (InsertRowsNode) walFlushListeners.get(j).getWalEntryHandler().getInsertNode();
- assertEquals(expect, actual);
- }
-
- walFlushListeners.get(0).getWalEntryHandler().unpinMemTable();
- return null;
- };
- Future<Void> future = executorService.submit(writeTask);
- futures.add(future);
- }
- // wait until all write tasks are done
- for (Future<Void> future : futures) {
- future.get();
- }
- executorService.shutdown();
- }
-
- private InsertRowNode getInsertRowNode(String devicePath, long time) throws IllegalPathException {
- TSDataType[] dataTypes =
- new TSDataType[] {
- TSDataType.DOUBLE,
- TSDataType.FLOAT,
- TSDataType.INT64,
- TSDataType.INT32,
- TSDataType.BOOLEAN,
- TSDataType.TEXT
- };
-
- Object[] columns = new Object[6];
- columns[0] = 1.0d;
- columns[1] = 2f;
- columns[2] = 10000L;
- columns[3] = 100;
- columns[4] = false;
- columns[5] = new Binary("hh" + 0, TSFileConfig.STRING_CHARSET);
-
- InsertRowNode node =
- new InsertRowNode(
- new PlanNodeId(""),
- new PartialPath(devicePath),
- false,
- new String[] {"s1", "s2", "s3", "s4", "s5", "s6"},
- dataTypes,
- time,
- columns,
- false);
- MeasurementSchema[] schemas = new MeasurementSchema[6];
- for (int i = 0; i < 6; i++) {
- schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]);
- }
- node.setMeasurementSchemas(schemas);
- return node;
- }
-
- private InsertRowsNode getInsertRowsNode(String devicePath, long firstTime)
- throws IllegalPathException {
- TSDataType[] dataTypes =
- new TSDataType[] {
- TSDataType.DOUBLE,
- TSDataType.FLOAT,
- TSDataType.INT64,
- TSDataType.INT32,
- TSDataType.BOOLEAN,
- TSDataType.TEXT
- };
-
- Object[] columns = new Object[6];
- columns[0] = 1.0d;
- columns[1] = 2f;
- columns[2] = 10000L;
- columns[3] = 100;
- columns[4] = false;
- columns[5] = new Binary("hh" + 0, TSFileConfig.STRING_CHARSET);
-
- InsertRowNode node =
- new InsertRowNode(
- new PlanNodeId(""),
- new PartialPath(devicePath),
- false,
- new String[] {"s1", "s2", "s3", "s4", "s5", "s6"},
- dataTypes,
- firstTime,
- columns,
- false);
- MeasurementSchema[] schemas = new MeasurementSchema[6];
- for (int i = 0; i < 6; i++) {
- schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]);
- }
- node.setMeasurementSchemas(schemas);
-
- InsertRowsNode insertRowsNode = new InsertRowsNode(new PlanNodeId(""));
- insertRowsNode.addOneInsertRowNode(node, 0);
-
- node =
- new InsertRowNode(
- new PlanNodeId(""),
- new PartialPath(devicePath),
- false,
- new String[] {"s1", "s2", "s3", "s4", "s5", "s6"},
- dataTypes,
- firstTime + 10,
- columns,
- false);
- schemas = new MeasurementSchema[6];
- for (int i = 0; i < 6; i++) {
- schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]);
- }
- node.setMeasurementSchemas(schemas);
- insertRowsNode.addOneInsertRowNode(node, 1);
- return insertRowsNode;
- }
-}
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java
index f72f55a..df88c34 100644
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java
+++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java
@@ -66,7 +66,6 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -149,14 +148,6 @@
}
}
assertEquals(expectedInsertTabletNodes, actualInsertTabletNodes);
- // check flush listeners
- try {
- for (WALFlushListener walFlushListener : walFlushListeners) {
- assertNotEquals(WALFlushListener.Status.FAILURE, walFlushListener.waitForResult());
- }
- } catch (NullPointerException e) {
- // ignore
- }
}
private void writeInsertTabletNode(
@@ -329,13 +320,5 @@
+ File.separator
+ WALFileUtils.getLogFileName(1, 0, WALFileStatus.CONTAINS_SEARCH_INDEX))
.exists());
- // check flush listeners
- try {
- for (WALFlushListener walFlushListener : walFlushListeners) {
- assertNotEquals(WALFlushListener.Status.FAILURE, walFlushListener.waitForResult());
- }
- } catch (NullPointerException e) {
- // ignore
- }
}
}
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java
index 593d25b..c132d26 100644
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java
+++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java
@@ -33,12 +33,8 @@
import org.apache.iotdb.db.storageengine.dataregion.DataRegionTest;
import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable;
import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable;
-import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler;
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALFileUtils;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache;
import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.db.utils.constant.TestConstant;
@@ -88,7 +84,6 @@
config.setDataRegionConsensusProtocolClass(prevConsensus);
EnvironmentUtils.cleanDir(logDirectory1);
StorageEngine.getInstance().reset();
- WALInsertNodeCache.getInstance().clear();
}
/**
@@ -283,66 +278,6 @@
}
/**
- * Ensure that wal pinned to memtable cannot be deleted: <br>
- * 1. _0-0-1.wal: memTable0 <br>
- * 2. pin memTable0 <br>
- * 3. memTable0 flush <br>
- * 4. roll wal file <br>
- * 5. _1-1-1.wal: memTable0、memTable1 <br>
- * 6. roll wal file <br>
- * 7. _2-1-1.wal: memTable1 <br>
- * 8. roll wal file <br>
- * 9. _2-1-1.wal: memTable1 <br>
- * 10. wait until all walEntry consumed <br>
- * 11. memTable0 flush, memTable1 flush <br>
- * 12. delete outdated wal files
- */
- @Test
- public void test05() throws IllegalPathException, MemTablePinException {
- IMemTable memTable0 = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable0, logDirectory1 + "/" + "fake.tsfile");
- WALFlushListener listener =
- walNode1.log(
- memTable0.getMemTableId(),
- generateInsertRowNode(devicePath, System.currentTimeMillis(), 1));
- walNode1.rollWALFile();
-
- // pin memTable
- WALEntryHandler handler = listener.getWalEntryHandler();
- handler.pinMemTable();
- walNode1.log(
- memTable0.getMemTableId(),
- generateInsertRowNode(devicePath, System.currentTimeMillis(), 2));
- IMemTable memTable1 = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode1.onMemTableCreated(memTable1, logDirectory1 + "/" + "fake.tsfile");
- walNode1.log(
- memTable1.getMemTableId(),
- generateInsertRowNode(devicePath, System.currentTimeMillis(), 3));
- walNode1.rollWALFile();
-
- walNode1.log(
- memTable1.getMemTableId(),
- generateInsertRowNode(devicePath, System.currentTimeMillis(), 4));
- walNode1.rollWALFile();
-
- walNode1.log(
- memTable1.getMemTableId(),
- generateInsertRowNode(devicePath, System.currentTimeMillis(), 5));
- walNode1.onMemTableFlushed(memTable0);
- walNode1.onMemTableFlushed(memTable1);
- Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed());
-
- Map<Long, Set<Long>> memTableIdsOfWal = walNode1.getWALBuffer().getMemTableIdsOfWal();
- Assert.assertEquals(4, memTableIdsOfWal.size());
- Assert.assertEquals(4, WALFileUtils.listAllWALFiles(new File(logDirectory1)).length);
-
- walNode1.deleteOutdatedFiles();
- Map<Long, Set<Long>> memTableIdsOfWalAfter = walNode1.getWALBuffer().getMemTableIdsOfWal();
- Assert.assertEquals(3, memTableIdsOfWalAfter.size());
- Assert.assertEquals(3, WALFileUtils.listAllWALFiles(new File(logDirectory1)).length);
- }
-
- /**
* Ensure that the flushed wal related to memtable cannot be deleted: <br>
* 1. _0-0-1.wal: memTable0 <br>
* 2. roll wal file <br>
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCacheTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCacheTest.java
deleted file mode 100644
index 552c833..0000000
--- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCacheTest.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.storageengine.dataregion.wal.utils;
-
-import org.apache.iotdb.commons.exception.IllegalPathException;
-import org.apache.iotdb.commons.path.PartialPath;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId;
-import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode;
-import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable;
-import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable;
-import org.apache.iotdb.db.storageengine.dataregion.wal.node.WALNode;
-import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener;
-import org.apache.iotdb.db.utils.EnvironmentUtils;
-import org.apache.iotdb.db.utils.constant.TestConstant;
-
-import org.apache.tsfile.common.conf.TSFileConfig;
-import org.apache.tsfile.enums.TSDataType;
-import org.apache.tsfile.utils.Binary;
-import org.apache.tsfile.write.schema.MeasurementSchema;
-import org.awaitility.Awaitility;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-public class WALInsertNodeCacheTest {
- private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- private static final String identifier = String.valueOf(Integer.MAX_VALUE);
- private static final String logDirectory = TestConstant.BASE_OUTPUT_PATH.concat("wal-test");
- private static final String databasePath = "root.test_sg";
- private static final String devicePath = databasePath + ".test_d";
- private static final String dataRegionId = "1";
- private static final WALInsertNodeCache cache = WALInsertNodeCache.getInstance();
- private WALMode prevMode;
- private WALNode walNode;
-
- @Before
- public void setUp() throws Exception {
- EnvironmentUtils.cleanDir(logDirectory);
- cache.clear();
- prevMode = config.getWalMode();
- config.setWalMode(WALMode.SYNC);
- walNode = new WALNode(identifier, logDirectory);
- }
-
- @After
- public void tearDown() throws Exception {
- walNode.close();
- cache.clear();
- config.setWalMode(prevMode);
- EnvironmentUtils.cleanDir(logDirectory);
- }
-
- @Test
- public void testLoadAfterSyncBuffer() throws IllegalPathException {
- try {
- // Limit the wal buffer size to trigger sync Buffer when writing wal entry
- walNode.setBufferSize(24);
- // write memTable
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile");
- InsertRowNode node1 = getInsertRowNode(System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode.log(memTable.getMemTableId(), node1);
- WALEntryPosition position = flushListener.getWalEntryHandler().getWalEntryPosition();
- // wait until wal flushed
- walNode.rollWALFile();
- Awaitility.await().until(() -> walNode.isAllWALEntriesConsumed() && position.canRead());
- // load by cache
- System.out.println(position.getPosition());
- assertEquals(node1, cache.getInsertNode(position));
- } finally {
- walNode.setBufferSize(config.getWalBufferSize());
- }
- }
-
- @Test
- public void testGetInsertNodeInParallel() throws IllegalPathException {
- // write memTable
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile");
- InsertRowNode node1 = getInsertRowNode(System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode.log(memTable.getMemTableId(), node1);
- WALEntryPosition position = flushListener.getWalEntryHandler().getWalEntryPosition();
- // wait until wal flushed
- walNode.rollWALFile();
- Awaitility.await().until(() -> walNode.isAllWALEntriesConsumed() && position.canRead());
- // Test getInsertNode in parallel to detect buffer concurrent problem
- AtomicBoolean failure = new AtomicBoolean(false);
- List<Thread> threadList = new ArrayList<>(5);
- for (int i = 0; i < 5; ++i) {
- Thread getInsertNodeThread =
- new Thread(
- () -> {
- if (!node1.equals(cache.getInsertNode(position))) {
- failure.set(true);
- }
- });
- threadList.add(getInsertNodeThread);
- getInsertNodeThread.start();
- }
- Awaitility.await()
- .until(
- () -> {
- for (Thread thread : threadList) {
- if (thread.isAlive()) {
- return false;
- }
- }
- return true;
- });
- assertFalse(failure.get());
- }
-
- @Test
- public void testLoadUnsealedWALFile() throws Exception {
- IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId);
- walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile");
- InsertRowNode node1 = getInsertRowNode(System.currentTimeMillis());
- node1.setSearchIndex(1);
- WALFlushListener flushListener = walNode.log(memTable.getMemTableId(), node1);
- WALEntryPosition position = flushListener.getWalEntryHandler().getWalEntryPosition();
- // wait until wal flushed
- Awaitility.await().until(() -> walNode.isAllWALEntriesConsumed() && position.canRead());
- // load by cache
- assertEquals(node1, cache.getInsertNode(position));
- }
-
- private InsertRowNode getInsertRowNode(long time) throws IllegalPathException {
- TSDataType[] dataTypes =
- new TSDataType[] {
- TSDataType.DOUBLE,
- TSDataType.FLOAT,
- TSDataType.INT64,
- TSDataType.INT32,
- TSDataType.BOOLEAN,
- TSDataType.TEXT
- };
-
- Object[] columns = new Object[6];
- columns[0] = 1.0d;
- columns[1] = 2f;
- columns[2] = 10000L;
- columns[3] = 100;
- columns[4] = false;
- columns[5] = new Binary("hh" + 0, TSFileConfig.STRING_CHARSET);
-
- InsertRowNode node =
- new InsertRowNode(
- new PlanNodeId(""),
- new PartialPath(WALInsertNodeCacheTest.devicePath),
- false,
- new String[] {"s1", "s2", "s3", "s4", "s5", "s6"},
- dataTypes,
- time,
- columns,
- false);
- MeasurementSchema[] schemas = new MeasurementSchema[6];
- for (int i = 0; i < 6; i++) {
- schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]);
- }
- node.setMeasurementSchemas(schemas);
- return node;
- }
-}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java
index 19aff15..2c0e29e 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java
@@ -69,6 +69,8 @@
PBTREE_RELEASE_MONITOR("PBTree-Release-Task-Monitor"),
PBTREE_FLUSH_MONITOR("PBTree-Flush-Monitor"),
PBTREE_WORKER_POOL("PBTree-Worker-Pool"),
+ SCHEMA_PARALLEL_POOL("Schema-Parallel-Pool"),
+
// -------------------------- ClientService --------------------------
CLIENT_RPC_SERVICE("ClientRPC-Service"),
CLIENT_RPC_PROCESSOR("ClientRPC-Processor"),
@@ -143,7 +145,7 @@
PIPE_RECEIVER_AIR_GAP_AGENT("Pipe-Receiver-Air-Gap-Agent"),
PIPE_AIR_GAP_RECEIVER("Pipe-Air-Gap-Receiver"),
PIPE_PROGRESS_INDEX_BACKGROUND_SERVICE("Pipe-Progress-Index-Background-Service"),
- LOAD_DATATYPE_CONVERT_POOL("Load-Datatype-Convert-Pool"),
+ PIPE_PARALLEL_EXECUTION_POOL("Pipe-Parallel-Execution-Pool"),
SUBSCRIPTION_EXECUTOR_POOL("Subscription-Executor-Pool"),
SUBSCRIPTION_RUNTIME_META_SYNCER("Subscription-Runtime-Meta-Syncer"),
WINDOW_EVALUATION_SERVICE("WindowEvaluationTaskPoolManager"),
@@ -242,7 +244,8 @@
PBTREE_RELEASE_MONITOR,
SCHEMA_FORCE_MLOG,
PBTREE_FLUSH_MONITOR,
- PBTREE_WORKER_POOL));
+ PBTREE_WORKER_POOL,
+ SCHEMA_PARALLEL_POOL));
private static final Set<ThreadName> clientServiceThreadNames =
new HashSet<>(Arrays.asList(CLIENT_RPC_SERVICE, CLIENT_RPC_PROCESSOR));
@@ -299,6 +302,7 @@
PIPE_RECEIVER_AIR_GAP_AGENT,
PIPE_AIR_GAP_RECEIVER,
PIPE_PROGRESS_INDEX_BACKGROUND_SERVICE,
+ PIPE_PARALLEL_EXECUTION_POOL,
SUBSCRIPTION_EXECUTOR_POOL,
SUBSCRIPTION_RUNTIME_META_SYNCER,
WINDOW_EVALUATION_SERVICE,
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
index 038581b..53980d2 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java
@@ -201,29 +201,52 @@
private String pipeProgressIndexPersistDirName = "progress";
- private String pipeHardlinkWALDirName = "wal";
-
- private boolean pipeHardLinkWALEnabled = false;
-
private boolean pipeFileReceiverFsyncEnabled = true;
private int pipeRealTimeQueuePollTsFileThreshold = 10;
- private int pipeRealTimeQueuePollHistoricalTsFileThreshold = 3;
+
+ // Sequentially poll the tsFile by default
+ private int pipeRealTimeQueuePollHistoricalTsFileThreshold = 1;
private int pipeRealTimeQueueMaxWaitingTsFileSize = 1;
/** The maximum number of threads that can be used to execute subtasks in PipeSubtaskExecutor. */
private int pipeSubtaskExecutorMaxThreadNum =
Math.max(5, Runtime.getRuntime().availableProcessors() / 2);
- private int pipeNonForwardingEventsProgressReportInterval = 100;
-
private int pipeDataStructureTabletRowSize = 2048;
private int pipeDataStructureTabletSizeInBytes = 2097152;
- private double pipeDataStructureTabletMemoryBlockAllocationRejectThreshold = 0.2;
- private double pipeDataStructureTsFileMemoryBlockAllocationRejectThreshold = 0.2;
- private double pipeDataStructureWalMemoryProportion = 0.3;
- private double PipeDataStructureBatchMemoryProportion = 0.1;
- private double pipeTotalFloatingMemoryProportion = 0.2;
+ private double pipeDataStructureTabletMemoryBlockAllocationRejectThreshold = 0.3;
+ private double pipeDataStructureTsFileMemoryBlockAllocationRejectThreshold = 0.3;
+ private double PipeDataStructureBatchMemoryProportion = 0.2;
+ private volatile double pipeTotalFloatingMemoryProportion = 0.5;
+
+ // Check if memory check is enabled for Pipe
+ private boolean isPipeEnableMemoryCheck = true;
+
+ // Memory for InsertNode queue: 15MB, used to temporarily store data awaiting processing
+ private long pipeInsertNodeQueueMemory = 15 * MB;
+
+ // Memory for TsFile to Tablet conversion: 17MB, used for further processing after converting
+ // TSFile format to Tablet format
+ // Note: Pipes that do not decompose pattern/time do not need this part of memory
+ private long pipeTsFileParserMemory = 17 * MB;
+
+ // Memory for Sink batch sending (InsertNode/TsFile, choose one)
+ // 1. InsertNode: 15MB, used for batch sending data to the downstream system
+ private long pipeSinkBatchMemoryInsertNode = 15 * MB;
+
+ // 2. TsFile: 15MB, used for storing data about to be written to TsFile, similar to memTable
+ private long pipeSinkBatchMemoryTsFile = 15 * MB;
+
+ // Memory needed for the ReadBuffer during the TsFile sending process: 15MB, buffer for the file
+ // sending process
+ private long pipeSendTsFileReadBuffer = 15 * MB;
+
+ // Reserved memory percentage to accommodate memory fluctuations during system operation
+ private double pipeReservedMemoryPercentage = 0.15;
+
+ // Minimum memory required for the receiver: 38MB
+ private long pipeMinimumReceiverMemory = 38 * MB;
private int pipeSubtaskExecutorBasicCheckPointIntervalByConsumedEventCount = 10_000;
private long pipeSubtaskExecutorBasicCheckPointIntervalByTimeDuration = 10 * 1000L;
@@ -232,9 +255,10 @@
private long pipeSubtaskExecutorCronHeartbeatEventIntervalSeconds = 20;
private long pipeSubtaskExecutorForcedRestartIntervalMs = Long.MAX_VALUE;
+ private long pipeMaxWaitFinishTime = 2 * 60 * 1000;
+
private int pipeExtractorAssignerDisruptorRingBufferSize = 65536;
private long pipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes = 50; // 50B
- private int pipeExtractorMatcherCacheSize = 1024;
private int pipeConnectorHandshakeTimeoutMs = 10 * 1000; // 10 seconds
private int pipeConnectorTransferTimeoutMs = 15 * 60 * 1000; // 15 minutes
@@ -261,7 +285,7 @@
(int) (RpcUtils.THRIFT_FRAME_MAX_SIZE * 0.8);
private boolean isSeperatedPipeHeartbeatEnabled = true;
- private int pipeHeartbeatIntervalSecondsForCollectingPipeMeta = 30;
+ private int pipeHeartbeatIntervalSecondsForCollectingPipeMeta = 3;
private long pipeMetaSyncerInitialSyncDelayMinutes = 3;
private long pipeMetaSyncerSyncIntervalMinutes = 3;
private long pipeMetaSyncerAutoRestartPipeCheckIntervalRound = 1;
@@ -278,24 +302,12 @@
private int pipeMaxAllowedHistoricalTsFilePerDataRegion = Integer.MAX_VALUE; // Deprecated
private int pipeMaxAllowedPendingTsFileEpochPerDataRegion = Integer.MAX_VALUE; // Deprecated
- private int pipeMaxAllowedPinnedMemTableCount = Integer.MAX_VALUE; // per data region
private long pipeMaxAllowedLinkedTsFileCount = Long.MAX_VALUE; // Deprecated
- private float pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage = 0.1F;
- private long pipeStuckRestartIntervalSeconds = 120;
- private long pipeStuckRestartMinIntervalMs = 5 * 60 * 1000L; // 5 minutes
- private boolean pipeEpochKeepTsFileAfterStuckRestartEnabled = false;
- private long pipeFlushAfterLastTerminateSeconds = 30;
- private long pipeFlushAfterTerminateCount = 30;
- private long pipeStorageEngineFlushTimeIntervalMs = Long.MAX_VALUE;
- private int pipeMaxAllowedRemainingInsertEventCountPerPipe = 10000;
- private int pipeMaxAllowedTotalRemainingInsertEventCount = 50000;
private int pipeMetaReportMaxLogNumPerRound = 10;
private int pipeMetaReportMaxLogIntervalRounds = 36;
private int pipeTsFilePinMaxLogNumPerRound = 10;
private int pipeTsFilePinMaxLogIntervalRounds = 90;
- private int pipeWalPinMaxLogNumPerRound = 10;
- private int pipeWalPinMaxLogIntervalRounds = 90;
private boolean pipeMemoryManagementEnabled = true;
private long pipeMemoryAllocateRetryIntervalMs = 50;
@@ -723,23 +735,6 @@
return timestampPrecisionCheckEnabled;
}
- public int getPipeNonForwardingEventsProgressReportInterval() {
- return pipeNonForwardingEventsProgressReportInterval;
- }
-
- public void setPipeNonForwardingEventsProgressReportInterval(
- int pipeNonForwardingEventsProgressReportInterval) {
- if (this.pipeNonForwardingEventsProgressReportInterval
- == pipeNonForwardingEventsProgressReportInterval) {
- return;
- }
- this.pipeNonForwardingEventsProgressReportInterval =
- pipeNonForwardingEventsProgressReportInterval;
- logger.info(
- "pipeNonForwardingEventsProgressReportInterval is set to {}.",
- pipeNonForwardingEventsProgressReportInterval);
- }
-
public String getPipeHardlinkBaseDirName() {
return pipeHardlinkBaseDirName;
}
@@ -776,30 +771,6 @@
logger.info("pipeProgressIndexPersistDir is set to {}.", pipeProgressIndexPersistDirName);
}
- public String getPipeHardlinkWALDirName() {
- return pipeHardlinkWALDirName;
- }
-
- public void setPipeHardlinkWALDirName(String pipeWALDirName) {
- if (Objects.equals(pipeWALDirName, this.pipeHardlinkWALDirName)) {
- return;
- }
- this.pipeHardlinkWALDirName = pipeWALDirName;
- logger.info("pipeHardlinkWALDirName is set to {}.", pipeWALDirName);
- }
-
- public boolean getPipeHardLinkWALEnabled() {
- return pipeHardLinkWALEnabled;
- }
-
- public void setPipeHardLinkWALEnabled(boolean pipeHardLinkWALEnabled) {
- if (this.pipeHardLinkWALEnabled == pipeHardLinkWALEnabled) {
- return;
- }
- this.pipeHardLinkWALEnabled = pipeHardLinkWALEnabled;
- logger.info("pipeHardLinkWALEnabled is set to {}.", pipeHardLinkWALEnabled);
- }
-
public boolean getPipeFileReceiverFsyncEnabled() {
return pipeFileReceiverFsyncEnabled;
}
@@ -871,19 +842,6 @@
pipeDataStructureTsFileMemoryBlockAllocationRejectThreshold);
}
- public double getPipeDataStructureWalMemoryProportion() {
- return pipeDataStructureWalMemoryProportion;
- }
-
- public void setPipeDataStructureWalMemoryProportion(double pipeDataStructureWalMemoryProportion) {
- if (this.pipeDataStructureWalMemoryProportion == pipeDataStructureWalMemoryProportion) {
- return;
- }
- this.pipeDataStructureWalMemoryProportion = pipeDataStructureWalMemoryProportion;
- logger.info(
- "pipeDataStructureWalMemoryProportion is set to {}.", pipeDataStructureWalMemoryProportion);
- }
-
public double getPipeDataStructureBatchMemoryProportion() {
return PipeDataStructureBatchMemoryProportion;
}
@@ -899,6 +857,102 @@
PipeDataStructureBatchMemoryProportion);
}
+ public boolean isPipeEnableMemoryChecked() {
+ return isPipeEnableMemoryCheck;
+ }
+
+ public void setIsPipeEnableMemoryChecked(boolean isPipeEnableMemoryChecked) {
+ if (this.isPipeEnableMemoryCheck == isPipeEnableMemoryChecked) {
+ return;
+ }
+ this.isPipeEnableMemoryCheck = isPipeEnableMemoryChecked;
+ logger.info("isPipeEnableMemoryChecked is set to {}.", isPipeEnableMemoryChecked);
+ }
+
+ public long getPipeInsertNodeQueueMemory() {
+ return pipeInsertNodeQueueMemory;
+ }
+
+ public void setPipeInsertNodeQueueMemory(long pipeInsertNodeQueueMemory) {
+ if (this.pipeInsertNodeQueueMemory == pipeInsertNodeQueueMemory) {
+ return;
+ }
+ this.pipeInsertNodeQueueMemory = pipeInsertNodeQueueMemory;
+ logger.info("pipeInsertNodeQueueMemory is set to {}.", pipeInsertNodeQueueMemory);
+ }
+
+ public long getPipeTsFileParserMemory() {
+ return pipeTsFileParserMemory;
+ }
+
+ public void setPipeTsFileParserMemory(long pipeTsFileParserMemory) {
+ if (this.pipeTsFileParserMemory == pipeTsFileParserMemory) {
+ return;
+ }
+ this.pipeTsFileParserMemory = pipeTsFileParserMemory;
+ logger.info("pipeTsFileParserMemory is set to {}.", pipeTsFileParserMemory);
+ }
+
+ public long getPipeSinkBatchMemoryInsertNode() {
+ return pipeSinkBatchMemoryInsertNode;
+ }
+
+ public void setPipeSinkBatchMemoryInsertNode(long pipeSinkBatchMemoryInsertNode) {
+ if (this.pipeSinkBatchMemoryInsertNode == pipeSinkBatchMemoryInsertNode) {
+ return;
+ }
+ this.pipeSinkBatchMemoryInsertNode = pipeSinkBatchMemoryInsertNode;
+ logger.info("pipeSinkBatchMemoryInsertNode is set to {}.", pipeSinkBatchMemoryInsertNode);
+ }
+
+ public long getPipeSinkBatchMemoryTsFile() {
+ return pipeSinkBatchMemoryTsFile;
+ }
+
+ public void setPipeSinkBatchMemoryTsFile(long pipeSinkBatchMemoryTsFile) {
+ if (this.pipeSinkBatchMemoryTsFile == pipeSinkBatchMemoryTsFile) {
+ return;
+ }
+ this.pipeSinkBatchMemoryTsFile = pipeSinkBatchMemoryTsFile;
+ logger.info("pipeSinkBatchMemoryTsFile is set to {}.", pipeSinkBatchMemoryTsFile);
+ }
+
+ public long getPipeSendTsFileReadBuffer() {
+ return pipeSendTsFileReadBuffer;
+ }
+
+ public void setPipeSendTsFileReadBuffer(long pipeSendTsFileReadBuffer) {
+ if (this.pipeSendTsFileReadBuffer == pipeSendTsFileReadBuffer) {
+ return;
+ }
+ this.pipeSendTsFileReadBuffer = pipeSendTsFileReadBuffer;
+ logger.info("pipeSendTsFileReadBuffer is set to {}.", pipeSendTsFileReadBuffer);
+ }
+
+ public double getPipeReservedMemoryPercentage() {
+ return pipeReservedMemoryPercentage;
+ }
+
+ public void setPipeReservedMemoryPercentage(double pipeReservedMemoryPercentage) {
+ if (this.pipeReservedMemoryPercentage == pipeReservedMemoryPercentage) {
+ return;
+ }
+ this.pipeReservedMemoryPercentage = pipeReservedMemoryPercentage;
+ logger.info("pipeReservedMemoryPercentage is set to {}.", pipeReservedMemoryPercentage);
+ }
+
+ public long getPipeMinimumReceiverMemory() {
+ return pipeMinimumReceiverMemory;
+ }
+
+ public void setPipeMinimumReceiverMemory(long pipeMinimumReceiverMemory) {
+ if (this.pipeMinimumReceiverMemory == pipeMinimumReceiverMemory) {
+ return;
+ }
+ this.pipeMinimumReceiverMemory = pipeMinimumReceiverMemory;
+ logger.info("pipeMinimumReceiverMemory is set to {}.", pipeMinimumReceiverMemory);
+ }
+
public double getPipeTotalFloatingMemoryProportion() {
return pipeTotalFloatingMemoryProportion;
}
@@ -946,18 +1000,6 @@
pipeExtractorAssignerDisruptorRingBufferEntrySize);
}
- public int getPipeExtractorMatcherCacheSize() {
- return pipeExtractorMatcherCacheSize;
- }
-
- public void setPipeExtractorMatcherCacheSize(int pipeExtractorMatcherCacheSize) {
- if (this.pipeExtractorMatcherCacheSize == pipeExtractorMatcherCacheSize) {
- return;
- }
- this.pipeExtractorMatcherCacheSize = pipeExtractorMatcherCacheSize;
- logger.info("pipeExtractorMatcherCacheSize is set to {}.", pipeExtractorMatcherCacheSize);
- }
-
public int getPipeConnectorHandshakeTimeoutMs() {
return pipeConnectorHandshakeTimeoutMs;
}
@@ -1380,6 +1422,18 @@
pipeSubtaskExecutorForcedRestartIntervalMs);
}
+ public long getPipeMaxWaitFinishTime() {
+ return pipeMaxWaitFinishTime;
+ }
+
+ public void setPipeMaxWaitFinishTime(long pipeMaxWaitFinishTime) {
+ if (this.pipeMaxWaitFinishTime == pipeMaxWaitFinishTime) {
+ return;
+ }
+ this.pipeMaxWaitFinishTime = pipeMaxWaitFinishTime;
+ logger.info("pipeMaxWaitFinishTime is set to {}.", pipeMaxWaitFinishTime);
+ }
+
public int getPipeRealTimeQueuePollTsFileThreshold() {
return pipeRealTimeQueuePollTsFileThreshold;
}
@@ -1512,19 +1566,6 @@
pipeMaxAllowedPendingTsFileEpochPerDataRegion);
}
- public int getPipeMaxAllowedPinnedMemTableCount() {
- return pipeMaxAllowedPinnedMemTableCount;
- }
-
- public void setPipeMaxAllowedPinnedMemTableCount(int pipeMaxAllowedPinnedMemTableCount) {
- if (this.pipeMaxAllowedPinnedMemTableCount == pipeMaxAllowedPinnedMemTableCount) {
- return;
- }
- this.pipeMaxAllowedPinnedMemTableCount = pipeMaxAllowedPinnedMemTableCount;
- logger.info(
- "pipeMaxAllowedPinnedMemTableCount is set to {}", pipeMaxAllowedPinnedMemTableCount);
- }
-
public long getPipeMaxAllowedLinkedTsFileCount() {
return pipeMaxAllowedLinkedTsFileCount;
}
@@ -1537,135 +1578,6 @@
logger.info("pipeMaxAllowedLinkedTsFileCount is set to {}", pipeMaxAllowedLinkedTsFileCount);
}
- public float getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage() {
- return pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage;
- }
-
- public void setPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage(
- float pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage) {
- if (this.pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage
- == pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage) {
- return;
- }
- this.pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage =
- pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage;
- logger.info(
- "pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage is set to {}",
- pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage);
- }
-
- public long getPipeStuckRestartIntervalSeconds() {
- return pipeStuckRestartIntervalSeconds;
- }
-
- public long getPipeStuckRestartMinIntervalMs() {
- return pipeStuckRestartMinIntervalMs;
- }
-
- public boolean isPipeEpochKeepTsFileAfterStuckRestartEnabled() {
- return pipeEpochKeepTsFileAfterStuckRestartEnabled;
- }
-
- public long getPipeStorageEngineFlushTimeIntervalMs() {
- return pipeStorageEngineFlushTimeIntervalMs;
- }
-
- public int getPipeMaxAllowedRemainingInsertEventCountPerPipe() {
- return pipeMaxAllowedRemainingInsertEventCountPerPipe;
- }
-
- public void setPipeMaxAllowedRemainingInsertEventCountPerPipe(
- int pipeMaxAllowedRemainingInsertEventCountPerPipe) {
- if (this.pipeMaxAllowedRemainingInsertEventCountPerPipe
- == pipeMaxAllowedRemainingInsertEventCountPerPipe) {
- return;
- }
- this.pipeMaxAllowedRemainingInsertEventCountPerPipe =
- pipeMaxAllowedRemainingInsertEventCountPerPipe;
- logger.info(
- "pipeMaxAllowedRemainingInsertEventCount is set to {}",
- pipeMaxAllowedRemainingInsertEventCountPerPipe);
- }
-
- public int getPipeMaxAllowedTotalRemainingInsertEventCount() {
- return pipeMaxAllowedTotalRemainingInsertEventCount;
- }
-
- public void setPipeMaxAllowedTotalRemainingInsertEventCount(
- int pipeMaxAllowedTotalRemainingInsertEventCount) {
- if (this.pipeMaxAllowedTotalRemainingInsertEventCount
- == pipeMaxAllowedTotalRemainingInsertEventCount) {
- return;
- }
- this.pipeMaxAllowedTotalRemainingInsertEventCount =
- pipeMaxAllowedTotalRemainingInsertEventCount;
- logger.info(
- "pipeMaxAllowedTotalRemainingInsertEventCount is set to {}",
- pipeMaxAllowedTotalRemainingInsertEventCount);
- }
-
- public void setPipeStuckRestartIntervalSeconds(long pipeStuckRestartIntervalSeconds) {
- if (this.pipeStuckRestartIntervalSeconds == pipeStuckRestartIntervalSeconds) {
- return;
- }
- this.pipeStuckRestartIntervalSeconds = pipeStuckRestartIntervalSeconds;
- logger.info("pipeStuckRestartIntervalSeconds is set to {}", pipeStuckRestartIntervalSeconds);
- }
-
- public void setPipeStuckRestartMinIntervalMs(long pipeStuckRestartMinIntervalMs) {
- if (this.pipeStuckRestartMinIntervalMs == pipeStuckRestartMinIntervalMs) {
- return;
- }
- this.pipeStuckRestartMinIntervalMs = pipeStuckRestartMinIntervalMs;
- logger.info("pipeStuckRestartMinIntervalMs is set to {}", pipeStuckRestartMinIntervalMs);
- }
-
- public void setPipeEpochKeepTsFileAfterStuckRestartEnabled(
- boolean pipeEpochKeepTsFileAfterStuckRestartEnabled) {
- if (this.pipeEpochKeepTsFileAfterStuckRestartEnabled
- == pipeEpochKeepTsFileAfterStuckRestartEnabled) {
- return;
- }
- this.pipeEpochKeepTsFileAfterStuckRestartEnabled = pipeEpochKeepTsFileAfterStuckRestartEnabled;
- logger.info(
- "pipeEpochKeepTsFileAfterStuckRestartEnabled is set to {}",
- pipeEpochKeepTsFileAfterStuckRestartEnabled);
- }
-
- public void setPipeStorageEngineFlushTimeIntervalMs(long pipeStorageEngineFlushTimeIntervalMs) {
- if (this.pipeStorageEngineFlushTimeIntervalMs == pipeStorageEngineFlushTimeIntervalMs) {
- return;
- }
- this.pipeStorageEngineFlushTimeIntervalMs = pipeStorageEngineFlushTimeIntervalMs;
- logger.info(
- "pipeStorageEngineFlushTimeIntervalMs is set to {}", pipeStorageEngineFlushTimeIntervalMs);
- }
-
- public long getPipeFlushAfterLastTerminateSeconds() {
- return pipeFlushAfterLastTerminateSeconds;
- }
-
- public void setPipeFlushAfterLastTerminateSeconds(long pipeFlushAfterLastTerminateSeconds) {
- if (this.pipeFlushAfterLastTerminateSeconds == pipeFlushAfterLastTerminateSeconds) {
- return;
- }
- this.pipeFlushAfterLastTerminateSeconds = pipeFlushAfterLastTerminateSeconds;
- logger.info(
- "pipeFlushAfterLastTerminateSeconds is set to {}", pipeFlushAfterLastTerminateSeconds);
- }
-
- public long getPipeFlushAfterTerminateCount() {
- return pipeFlushAfterTerminateCount;
- }
-
- public void setPipeFlushAfterTerminateCount(long pipeFlushAfterTerminateCount) {
- if (this.pipeFlushAfterTerminateCount == pipeFlushAfterTerminateCount) {
- return;
- }
- this.pipeFlushAfterTerminateCount = pipeFlushAfterTerminateCount;
- logger.info("pipeFlushAfterTerminateCount is set to {}", pipeFlushAfterTerminateCount);
- }
-
public int getPipeMetaReportMaxLogNumPerRound() {
return pipeMetaReportMaxLogNumPerRound;
}
@@ -1716,30 +1628,6 @@
"pipeTsFilePinMaxLogIntervalRounds is set to {}", pipeTsFilePinMaxLogIntervalRounds);
}
- public int getPipeWalPinMaxLogNumPerRound() {
- return pipeWalPinMaxLogNumPerRound;
- }
-
- public void setPipeWalPinMaxLogNumPerRound(int pipeWalPinMaxLogNumPerRound) {
- if (this.pipeWalPinMaxLogNumPerRound == pipeWalPinMaxLogNumPerRound) {
- return;
- }
- this.pipeWalPinMaxLogNumPerRound = pipeWalPinMaxLogNumPerRound;
- logger.info("pipeWalPinMaxLogNumPerRound is set to {}", pipeWalPinMaxLogNumPerRound);
- }
-
- public int getPipeWalPinMaxLogIntervalRounds() {
- return pipeWalPinMaxLogIntervalRounds;
- }
-
- public void setPipeWalPinMaxLogIntervalRounds(int pipeWalPinMaxLogIntervalRounds) {
- if (this.pipeWalPinMaxLogIntervalRounds == pipeWalPinMaxLogIntervalRounds) {
- return;
- }
- this.pipeWalPinMaxLogIntervalRounds = pipeWalPinMaxLogIntervalRounds;
- logger.info("pipeWalPinMaxLogIntervalRounds is set to {}", pipeWalPinMaxLogIntervalRounds);
- }
-
public boolean getPipeMemoryManagementEnabled() {
return pipeMemoryManagementEnabled;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java
index b54d6db..3c8d13b 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java
@@ -215,7 +215,7 @@
* <p>Notice:TotalOrderSumTuple is an ordered tuple, the larger the subscript the higher the
* weight of the element when comparing sizes, e.g. (1, 2) is larger than (2, 1).
*/
- protected static class TotalOrderSumTuple implements Comparable<TotalOrderSumTuple> {
+ public static class TotalOrderSumTuple implements Comparable<TotalOrderSumTuple> {
private final ImmutableList<Long> tuple;
/**
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndexType.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndexType.java
index 58548e1..d4b0f4f 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndexType.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndexType.java
@@ -24,6 +24,7 @@
import org.apache.iotdb.commons.consensus.index.impl.MetaProgressIndex;
import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex;
import org.apache.iotdb.commons.consensus.index.impl.RecoverProgressIndex;
+import org.apache.iotdb.commons.consensus.index.impl.SegmentProgressIndex;
import org.apache.iotdb.commons.consensus.index.impl.SimpleProgressIndex;
import org.apache.iotdb.commons.consensus.index.impl.StateProgressIndex;
import org.apache.iotdb.commons.consensus.index.impl.TimeWindowStateProgressIndex;
@@ -43,7 +44,8 @@
HYBRID_PROGRESS_INDEX((short) 5),
META_PROGRESS_INDEX((short) 6),
TIME_WINDOW_STATE_PROGRESS_INDEX((short) 7),
- STATE_PROGRESS_INDEX((short) 8);
+ STATE_PROGRESS_INDEX((short) 8),
+ SEGMENT_PROGRESS_INDEX((short) 9);
private final short type;
@@ -82,6 +84,8 @@
return TimeWindowStateProgressIndex.deserializeFrom(byteBuffer);
case 8:
return StateProgressIndex.deserializeFrom(byteBuffer);
+ case 9:
+ return SegmentProgressIndex.deserializeFrom(byteBuffer);
default:
throw new UnsupportedOperationException(
String.format("Unsupported progress index type %s.", indexType));
@@ -107,6 +111,8 @@
return TimeWindowStateProgressIndex.deserializeFrom(stream);
case 8:
return StateProgressIndex.deserializeFrom(stream);
+ case 9:
+ return SegmentProgressIndex.deserializeFrom(stream);
default:
throw new UnsupportedOperationException(
String.format("Unsupported progress index type %s.", indexType));
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java
index 8b02d85..8f6a248 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java
@@ -109,14 +109,18 @@
return false;
}
- final IoTProgressIndex thisIoTProgressIndex = this;
final IoTProgressIndex thatIoTProgressIndex = (IoTProgressIndex) progressIndex;
- return thatIoTProgressIndex.peerId2SearchIndex.entrySet().stream()
- .noneMatch(
- entry ->
- !thisIoTProgressIndex.peerId2SearchIndex.containsKey(entry.getKey())
- || thisIoTProgressIndex.peerId2SearchIndex.get(entry.getKey())
- <= entry.getValue());
+ boolean isEquals = true;
+ for (final Map.Entry<Integer, Long> entry :
+ thatIoTProgressIndex.peerId2SearchIndex.entrySet()) {
+ if (!peerId2SearchIndex.containsKey(entry.getKey())
+ || peerId2SearchIndex.get(entry.getKey()) < entry.getValue()) {
+ return false;
+ } else if (peerId2SearchIndex.get(entry.getKey()) > entry.getValue()) {
+ isEquals = false;
+ }
+ }
+ return !isEquals;
} finally {
lock.readLock().unlock();
}
@@ -204,15 +208,6 @@
}
}
- public int getPeerId2SearchIndexSize() {
- lock.readLock().lock();
- try {
- return peerId2SearchIndex.size();
- } finally {
- lock.readLock().unlock();
- }
- }
-
public static IoTProgressIndex deserializeFrom(ByteBuffer byteBuffer) {
final IoTProgressIndex ioTProgressIndex = new IoTProgressIndex();
final int size = ReadWriteIOUtils.readInt(byteBuffer);
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/SegmentProgressIndex.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/SegmentProgressIndex.java
new file mode 100644
index 0000000..5309e54
--- /dev/null
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/SegmentProgressIndex.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.commons.consensus.index.impl;
+
+import org.apache.iotdb.commons.consensus.index.ProgressIndex;
+import org.apache.iotdb.commons.consensus.index.ProgressIndexType;
+
+import org.apache.tsfile.utils.Pair;
+import org.apache.tsfile.utils.RamUsageEstimator;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import javax.annotation.Nonnull;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * {@link SegmentProgressIndex} is a usual {@link ProgressIndex} with broken segments allowed. An
+ * {@link org.apache.iotdb.pipe.api.event.Event} is sent if its {@link ProgressIndex} <= the {@link
+ * #latestProgressIndex} and none of the {@link #brokenProgressIndexes}es has its {@link Pair#left}
+ * <= its {@link ProgressIndex} < {@link Pair#right}. If the {@link #brokenProgressIndexes} {@link
+ * List#isEmpty()}, the {@link ProgressIndex} behave just like the {@link #latestProgressIndex}. It
+ * is only used in the realtime data region extractor's {@link
+ * org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta} to handle downgrading, and will never
+ * be in the insertNodes or tsFiles.
+ */
+public class SegmentProgressIndex extends ProgressIndex {
+ private static final long INSTANCE_SIZE =
+ RamUsageEstimator.shallowSizeOfInstance(SegmentProgressIndex.class);
+ public static final long LIST_SIZE = RamUsageEstimator.shallowSizeOfInstance(ArrayList.class);
+ public static final long PAIR_SIZE = RamUsageEstimator.shallowSizeOfInstance(Pair.class);
+ private ProgressIndex latestProgressIndex = MinimumProgressIndex.INSTANCE;
+
+ // <startIndex, endIndex> of the downgraded segments
+ private final LinkedList<Pair<ProgressIndex, ProgressIndex>> brokenProgressIndexes =
+ new LinkedList<>();
+
+ public void recordStart(final ProgressIndex index) {
+ brokenProgressIndexes.add(new Pair<>(index, null));
+ }
+
+ public void recordEnd(final ProgressIndex index) {
+ brokenProgressIndexes.getLast().setRight(index);
+ }
+
+ public void eliminate(final ProgressIndex index) {
+ final Iterator<Pair<ProgressIndex, ProgressIndex>> iterator = brokenProgressIndexes.iterator();
+ while (iterator.hasNext()) {
+ if (index.equals(iterator.next().getRight())) {
+ iterator.remove();
+ return;
+ }
+ }
+ }
+
+ @Override
+ public void serialize(final ByteBuffer byteBuffer) {
+ ProgressIndexType.SEGMENT_PROGRESS_INDEX.serialize(byteBuffer);
+
+ latestProgressIndex.serialize(byteBuffer);
+ ReadWriteIOUtils.write(brokenProgressIndexes.size(), byteBuffer);
+ for (final Pair<ProgressIndex, ProgressIndex> index : brokenProgressIndexes) {
+ index.getLeft().serialize(byteBuffer);
+ index.getRight().serialize(byteBuffer);
+ }
+ }
+
+ @Override
+ public void serialize(final OutputStream stream) throws IOException {
+ ProgressIndexType.SEGMENT_PROGRESS_INDEX.serialize(stream);
+
+ latestProgressIndex.serialize(stream);
+ ReadWriteIOUtils.write(brokenProgressIndexes.size(), stream);
+ for (final Pair<ProgressIndex, ProgressIndex> index : brokenProgressIndexes) {
+ index.getLeft().serialize(stream);
+ index.getRight().serialize(stream);
+ }
+ }
+
+ @Override
+ public boolean isAfter(final @Nonnull ProgressIndex progressIndex) {
+ return latestProgressIndex.isAfter(progressIndex)
+ && brokenProgressIndexes.stream()
+ .noneMatch(
+ pair ->
+ pair.getRight().isAfter(progressIndex)
+ && (progressIndex.isAfter(pair.getLeft())
+ || progressIndex.equals(pair.getLeft())));
+ }
+
+ @Override
+ public boolean equals(final ProgressIndex progressIndex) {
+ if (progressIndex == null) {
+ return false;
+ }
+ if (this == progressIndex) {
+ return true;
+ }
+ if (progressIndex instanceof SegmentProgressIndex) {
+ final SegmentProgressIndex that = (SegmentProgressIndex) progressIndex;
+ return this.latestProgressIndex.equals(that.latestProgressIndex)
+ && this.brokenProgressIndexes.equals(that.brokenProgressIndexes);
+ }
+ return this.latestProgressIndex.equals(progressIndex);
+ }
+
+ @Override
+ public ProgressIndex updateToMinimumEqualOrIsAfterProgressIndex(
+ final ProgressIndex progressIndex) {
+ return latestProgressIndex.updateToMinimumEqualOrIsAfterProgressIndex(progressIndex);
+ }
+
+ @Override
+ public ProgressIndexType getType() {
+ return ProgressIndexType.SEGMENT_PROGRESS_INDEX;
+ }
+
+ @Override
+ public TotalOrderSumTuple getTotalOrderSumTuple() {
+ throw new UnsupportedOperationException(
+ "This progressIndex is not for tsFile and shall never be used to sort resources");
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return INSTANCE_SIZE
+ + latestProgressIndex.ramBytesUsed()
+ + shallowSizeOfList(brokenProgressIndexes)
+ + PAIR_SIZE * brokenProgressIndexes.size()
+ + brokenProgressIndexes.stream()
+ .mapToLong(index -> index.getLeft().ramBytesUsed() + index.getRight().ramBytesUsed())
+ .reduce(0L, Long::sum);
+ }
+
+ public static SegmentProgressIndex deserializeFrom(final ByteBuffer byteBuffer) {
+ final SegmentProgressIndex segmentProgressIndex = new SegmentProgressIndex();
+ segmentProgressIndex.latestProgressIndex = ProgressIndexType.deserializeFrom(byteBuffer);
+ final int size = ReadWriteIOUtils.readInt(byteBuffer);
+ for (int i = 0; i < size; i++) {
+ segmentProgressIndex.brokenProgressIndexes.add(
+ new Pair<>(
+ ProgressIndexType.deserializeFrom(byteBuffer),
+ ProgressIndexType.deserializeFrom(byteBuffer)));
+ }
+ return segmentProgressIndex;
+ }
+
+ public static SegmentProgressIndex deserializeFrom(final InputStream stream) throws IOException {
+ final SegmentProgressIndex segmentProgressIndex = new SegmentProgressIndex();
+ segmentProgressIndex.latestProgressIndex = ProgressIndexType.deserializeFrom(stream);
+ final int size = ReadWriteIOUtils.readInt(stream);
+ for (int i = 0; i < size; i++) {
+ segmentProgressIndex.brokenProgressIndexes.add(
+ new Pair<>(
+ ProgressIndexType.deserializeFrom(stream),
+ ProgressIndexType.deserializeFrom(stream)));
+ }
+ return segmentProgressIndex;
+ }
+
+ private long shallowSizeOfList(final List<?> list) {
+ return Objects.nonNull(list)
+ ? SegmentProgressIndex.LIST_SIZE
+ + RamUsageEstimator.alignObjectSize(
+ RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
+ + (long) RamUsageEstimator.NUM_BYTES_OBJECT_REF * list.size())
+ : 0L;
+ }
+}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java
index 353da8c..9942f64 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java
@@ -19,6 +19,7 @@
package org.apache.iotdb.commons.pipe.agent.task;
+import org.apache.iotdb.commons.conf.CommonDescriptor;
import org.apache.iotdb.commons.exception.IllegalPathException;
import org.apache.iotdb.commons.exception.pipe.PipeRuntimeConnectorCriticalException;
import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException;
@@ -44,6 +45,7 @@
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
@@ -55,6 +57,7 @@
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
import java.util.stream.Collectors;
/**
@@ -343,7 +346,10 @@
public List<TPushPipeMetaRespExceptionMessage> handlePipeMetaChanges(
final List<PipeMeta> pipeMetaListFromCoordinator) {
- acquireWriteLock();
+ if (!tryWriteLockWithTimeOut(
+ CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() * 2L / 3)) {
+ return null;
+ }
try {
return handlePipeMetaChangesInternal(pipeMetaListFromCoordinator);
} finally {
@@ -463,6 +469,11 @@
final String pipeName = pipeMetaFromCoordinator.getStaticMeta().getPipeName();
final long creationTime = pipeMetaFromCoordinator.getStaticMeta().getCreationTime();
+ calculateMemoryUsage(
+ pipeMetaFromCoordinator.getStaticMeta().getExtractorParameters(),
+ pipeMetaFromCoordinator.getStaticMeta().getProcessorParameters(),
+ pipeMetaFromCoordinator.getStaticMeta().getConnectorParameters());
+
final PipeMeta existedPipeMeta = pipeMetaKeeper.getPipeMeta(pipeName);
if (existedPipeMeta != null) {
if (!checkBeforeCreatePipe(existedPipeMeta, pipeName, creationTime)) {
@@ -481,7 +492,7 @@
// Trigger create() method for each pipe task by parallel stream
final long startTime = System.currentTimeMillis();
- pipeTasks.values().parallelStream().forEach(PipeTask::create);
+ runPipeTasks(pipeTasks.values(), PipeTask::create);
LOGGER.info(
"Create all pipe tasks on Pipe {} successfully within {} ms",
pipeName,
@@ -503,6 +514,13 @@
return needToStartPipe;
}
+ protected void calculateMemoryUsage(
+ final PipeParameters extractorParameters,
+ final PipeParameters processorParameters,
+ final PipeParameters connectorParameters) {
+ // do nothing
+ }
+
protected abstract Map<Integer, PipeTask> buildPipeTasks(final PipeMeta pipeMetaFromCoordinator)
throws IllegalPathException;
@@ -536,7 +554,7 @@
// Trigger drop() method for each pipe task by parallel stream
final long startTime = System.currentTimeMillis();
- pipeTasks.values().parallelStream().forEach(PipeTask::drop);
+ runPipeTasks(pipeTasks.values(), PipeTask::drop);
LOGGER.info(
"Drop all pipe tasks on Pipe {} successfully within {} ms",
pipeName,
@@ -575,7 +593,7 @@
// Trigger drop() method for each pipe task by parallel stream
final long startTime = System.currentTimeMillis();
- pipeTasks.values().parallelStream().forEach(PipeTask::drop);
+ runPipeTasks(pipeTasks.values(), PipeTask::drop);
LOGGER.info(
"Drop all pipe tasks on Pipe {} successfully within {} ms",
pipeName,
@@ -608,7 +626,7 @@
// Trigger start() method for each pipe task by parallel stream
final long startTime = System.currentTimeMillis();
- pipeTasks.values().parallelStream().forEach(PipeTask::start);
+ runPipeTasks(pipeTasks.values(), PipeTask::start);
LOGGER.info(
"Start all pipe tasks on Pipe {} successfully within {} ms",
pipeName,
@@ -647,7 +665,7 @@
// Trigger stop() method for each pipe task by parallel stream
final long startTime = System.currentTimeMillis();
- pipeTasks.values().parallelStream().forEach(PipeTask::stop);
+ runPipeTasks(pipeTasks.values(), PipeTask::stop);
LOGGER.info(
"Stop all pipe tasks on Pipe {} successfully within {} ms",
pipeName,
@@ -1047,7 +1065,10 @@
public void collectPipeMetaList(final TPipeHeartbeatReq req, final TPipeHeartbeatResp resp)
throws TException {
- acquireReadLock();
+ if (!tryReadLockWithTimeOut(
+ CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() * 2L / 3)) {
+ return;
+ }
try {
collectPipeMetaListInternal(req, resp);
} finally {
@@ -1058,6 +1079,9 @@
protected abstract void collectPipeMetaListInternal(
final TPipeHeartbeatReq req, final TPipeHeartbeatResp resp) throws TException;
+ public abstract void runPipeTasks(
+ final Collection<PipeTask> pipeTasks, final Consumer<PipeTask> runSingle);
+
///////////////////////// Maintain meta info /////////////////////////
public long getPipeCreationTime(final String pipeName) {
@@ -1100,17 +1124,21 @@
: ((PipeTemporaryMetaInAgent) pipeMeta.getTemporaryMeta()).getFloatingMemoryUsageInByte();
}
- public void addFloatingMemoryUsageInByte(final String pipeName, final long sizeInByte) {
+ public void addFloatingMemoryUsageInByte(
+ final String pipeName, final long creationTime, final long sizeInByte) {
final PipeMeta pipeMeta = pipeMetaKeeper.getPipeMeta(pipeName);
- if (Objects.nonNull(pipeMeta)) {
+ // To avoid stale pipe before alter
+ if (Objects.nonNull(pipeMeta) && pipeMeta.getStaticMeta().getCreationTime() == creationTime) {
((PipeTemporaryMetaInAgent) pipeMeta.getTemporaryMeta())
.addFloatingMemoryUsageInByte(sizeInByte);
}
}
- public void decreaseFloatingMemoryUsageInByte(final String pipeName, final long sizeInByte) {
+ public void decreaseFloatingMemoryUsageInByte(
+ final String pipeName, final long creationTime, final long sizeInByte) {
final PipeMeta pipeMeta = pipeMetaKeeper.getPipeMeta(pipeName);
- if (Objects.nonNull(pipeMeta)) {
+ // To avoid stale pipe before alter
+ if (Objects.nonNull(pipeMeta) && pipeMeta.getStaticMeta().getCreationTime() == creationTime) {
((PipeTemporaryMetaInAgent) pipeMeta.getTemporaryMeta())
.decreaseFloatingMemoryUsageInByte(sizeInByte);
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/BlockingPendingQueue.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/BlockingPendingQueue.java
index aa93b09..2fde7ff 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/BlockingPendingQueue.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/BlockingPendingQueue.java
@@ -50,7 +50,7 @@
this.eventCounter = eventCounter;
}
- public boolean waitedOffer(final E event) {
+ public synchronized boolean waitedOffer(final E event) {
checkBeforeOffer(event);
try {
final boolean offered =
@@ -69,7 +69,7 @@
}
}
- public boolean directOffer(final E event) {
+ public synchronized boolean directOffer(final E event) {
checkBeforeOffer(event);
final boolean offered = pendingQueue.offer(event);
if (offered) {
@@ -78,7 +78,7 @@
return offered;
}
- public boolean put(final E event) {
+ public synchronized boolean put(final E event) {
checkBeforeOffer(event);
try {
pendingQueue.put(event);
@@ -91,13 +91,13 @@
}
}
- public E directPoll() {
+ public synchronized E directPoll() {
final E event = pendingQueue.poll();
eventCounter.decreaseEventCount(event);
return event;
}
- public E waitedPoll() {
+ public synchronized E waitedPoll() {
E event = null;
try {
event =
@@ -112,22 +112,22 @@
return event;
}
- public E peek() {
+ public synchronized E peek() {
return pendingQueue.peek();
}
- public void clear() {
+ public synchronized void clear() {
isClosed.set(true);
pendingQueue.clear();
eventCounter.reset();
}
/** DO NOT FORGET to set eventCounter to new value after invoking this method. */
- public void forEach(final Consumer<? super E> action) {
+ public synchronized void forEach(final Consumer<? super E> action) {
pendingQueue.forEach(action);
}
- public void discardAllEvents() {
+ public synchronized void discardAllEvents() {
isClosed.set(true);
pendingQueue.removeIf(
event -> {
@@ -141,7 +141,7 @@
eventCounter.reset();
}
- public void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) {
+ public synchronized void discardEventsOfPipe(final String pipeNameToDrop, final int regionId) {
pendingQueue.removeIf(
event -> {
if (event instanceof EnrichedEvent
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/UnboundedBlockingPendingQueue.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/UnboundedBlockingPendingQueue.java
index 785e89c..1172ae1 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/UnboundedBlockingPendingQueue.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/connection/UnboundedBlockingPendingQueue.java
@@ -34,7 +34,7 @@
pendingDeque = (BlockingDeque<E>) pendingQueue;
}
- public E peekLast() {
+ public synchronized E peekLast() {
return pendingDeque.peekLast();
}
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeRuntimeMeta.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeRuntimeMeta.java
index 752edae..402a601 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeRuntimeMeta.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeRuntimeMeta.java
@@ -140,6 +140,15 @@
this.isStoppedByRuntimeException.set(isStoppedByRuntimeException);
}
+ public void persistProgressIndex() {
+ // Iterate through all the task metas and persist their progress index
+ for (final PipeTaskMeta taskMeta : consensusGroupId2TaskMetaMap.values()) {
+ if (taskMeta.getProgressIndex() != null) {
+ taskMeta.persistProgressIndex();
+ }
+ }
+ }
+
public ByteBuffer serialize() throws IOException {
PublicBAOS byteArrayOutputStream = new PublicBAOS();
DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream);
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeTaskMeta.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeTaskMeta.java
index 6a4ab25..4a753c0 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeTaskMeta.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/meta/PipeTaskMeta.java
@@ -113,34 +113,31 @@
public ProgressIndex updateProgressIndex(final ProgressIndex updateIndex) {
// only pipeTaskMeta that need to updateProgressIndex will persist progress index
// isRegisterPersistTask is used to avoid multiple threads registering persist task concurrently
- if (Objects.nonNull(progressIndexPersistFile)
- && !isRegisterPersistTask.getAndSet(true)
+ if (PipeConfig.getInstance().isPipeProgressIndexPersistEnabled()
&& this.persistProgressIndexFuture == null
- && PipeConfig.getInstance().isPipeProgressIndexPersistEnabled()) {
+ && !isRegisterPersistTask.getAndSet(true)) {
this.persistProgressIndexFuture =
PipePeriodicalJobExecutor.submitBackgroundJob(
- () -> {
- if (PipeConfig.getInstance().isPipeProgressIndexPersistEnabled()) {
- persistProgressIndex();
- }
- },
+ this::persistProgressIndex,
0,
PipeConfig.getInstance().getPipeProgressIndexFlushIntervalMs());
}
progressIndex.updateAndGet(
index -> index.updateToMinimumEqualOrIsAfterProgressIndex(updateIndex));
- if (Objects.nonNull(progressIndexPersistFile)
- && updateCount.incrementAndGet() - lastPersistCount.get() > checkPointGap
- && PipeConfig.getInstance().isPipeProgressIndexPersistEnabled()) {
+
+ if (PipeConfig.getInstance().isPipeProgressIndexPersistEnabled()
+ && updateCount.incrementAndGet() - lastPersistCount.get() > checkPointGap) {
persistProgressIndex();
}
+
return progressIndex.get();
}
- private synchronized void persistProgressIndex() {
- if (lastPersistCount.get() == updateCount.get()) {
- // in case of multiple threads calling updateProgressIndex at the same time
+ public synchronized void persistProgressIndex() {
+ if (Objects.isNull(progressIndexPersistFile)
+ // in case of multiple threads calling updateProgressIndex at the same time
+ || lastPersistCount.get() == updateCount.get()) {
return;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java
index b37bd07..7056b05 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java
@@ -118,9 +118,29 @@
}
}
}
- if (committerKey == null || event.getCommitId() <= EnrichedEvent.NO_COMMIT_ID) {
+ if (committerKey == null) {
return;
}
+ if (event.hasMultipleCommitIds()) {
+ commitMultipleIds(committerKey, event);
+ } else {
+ commitSingleId(committerKey, event.getCommitId(), event);
+ }
+ }
+
+ private void commitMultipleIds(final CommitterKey committerKey, final EnrichedEvent event) {
+ for (final long commitId : event.getCommitIds()) {
+ if (commitSingleId(committerKey, commitId, event)) {
+ return;
+ }
+ }
+ }
+
+ private boolean commitSingleId(
+ final CommitterKey committerKey, final long commitId, final EnrichedEvent event) {
+ if (commitId <= EnrichedEvent.NO_COMMIT_ID) {
+ return false;
+ }
final PipeEventCommitter committer = eventCommitterMap.get(committerKey);
if (committer == null) {
@@ -142,10 +162,11 @@
Thread.currentThread().getStackTrace());
}
}
- return;
+ return false;
}
committer.commit(event);
+ return true;
}
private CommitterKey generateCommitterKey(
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java
index 9479c7a..0e59212 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java
@@ -58,6 +58,11 @@
@SuppressWarnings("java:S899")
public synchronized void commit(final EnrichedEvent event) {
+ if (event.hasMultipleCommitIds()) {
+ for (final EnrichedEvent dummyEvent : event.getDummyEventsForCommitIds()) {
+ commitQueue.offer(dummyEvent);
+ }
+ }
commitQueue.offer(event);
final int commitQueueSizeBeforeCommit = commitQueue.size();
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java
index be0c70d..7c372ad 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java
@@ -30,12 +30,6 @@
private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig();
- /////////////////////////////// Data Synchronization ///////////////////////////////
-
- public int getPipeNonForwardingEventsProgressReportInterval() {
- return COMMON_CONFIG.getPipeNonForwardingEventsProgressReportInterval();
- }
-
/////////////////////////////// File ///////////////////////////////
public String getPipeHardlinkBaseDirName() {
@@ -62,14 +56,6 @@
return COMMON_CONFIG.getPipeProgressIndexFlushIntervalMs();
}
- public String getPipeHardlinkWALDirName() {
- return COMMON_CONFIG.getPipeHardlinkWALDirName();
- }
-
- public boolean getPipeHardLinkWALEnabled() {
- return COMMON_CONFIG.getPipeHardLinkWALEnabled();
- }
-
public boolean getPipeFileReceiverFsyncEnabled() {
return COMMON_CONFIG.getPipeFileReceiverFsyncEnabled();
}
@@ -95,16 +81,44 @@
return COMMON_CONFIG.getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold();
}
- public double getPipeDataStructureWalMemoryProportion() {
- return COMMON_CONFIG.getPipeDataStructureWalMemoryProportion();
+ public double getPipeTotalFloatingMemoryProportion() {
+ return COMMON_CONFIG.getPipeTotalFloatingMemoryProportion();
}
public double getPipeDataStructureBatchMemoryProportion() {
return COMMON_CONFIG.getPipeDataStructureBatchMemoryProportion();
}
- public double getPipeTotalFloatingMemoryProportion() {
- return COMMON_CONFIG.getPipeTotalFloatingMemoryProportion();
+ public boolean isPipeEnableMemoryCheck() {
+ return COMMON_CONFIG.isPipeEnableMemoryChecked();
+ }
+
+ public long PipeInsertNodeQueueMemory() {
+ return COMMON_CONFIG.getPipeInsertNodeQueueMemory();
+ }
+
+ public long getTsFileParserMemory() {
+ return COMMON_CONFIG.getPipeTsFileParserMemory();
+ }
+
+ public long getSinkBatchMemoryInsertNode() {
+ return COMMON_CONFIG.getPipeSinkBatchMemoryInsertNode();
+ }
+
+ public long getSinkBatchMemoryTsFile() {
+ return COMMON_CONFIG.getPipeSinkBatchMemoryTsFile();
+ }
+
+ public long getSendTsFileReadBuffer() {
+ return COMMON_CONFIG.getPipeSendTsFileReadBuffer();
+ }
+
+ public double getReservedMemoryPercentage() {
+ return COMMON_CONFIG.getPipeReservedMemoryPercentage();
+ }
+
+ public long getPipeMinimumReceiverMemory() {
+ return COMMON_CONFIG.getPipeMinimumReceiverMemory();
}
/////////////////////////////// Subtask Connector ///////////////////////////////
@@ -147,6 +161,10 @@
return COMMON_CONFIG.getPipeSubtaskExecutorForcedRestartIntervalMs();
}
+ public long getPipeMaxWaitFinishTime() {
+ return COMMON_CONFIG.getPipeMaxWaitFinishTime();
+ }
+
/////////////////////////////// Extractor ///////////////////////////////
public int getPipeExtractorAssignerDisruptorRingBufferSize() {
@@ -157,10 +175,6 @@
return COMMON_CONFIG.getPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes();
}
- public int getPipeExtractorMatcherCacheSize() {
- return COMMON_CONFIG.getPipeExtractorMatcherCacheSize();
- }
-
/////////////////////////////// Connector ///////////////////////////////
public int getPipeConnectorHandshakeTimeoutMs() {
@@ -339,50 +353,10 @@
return COMMON_CONFIG.getPipeMaxAllowedPendingTsFileEpochPerDataRegion();
}
- public int getPipeMaxAllowedPinnedMemTableCount() {
- return COMMON_CONFIG.getPipeMaxAllowedPinnedMemTableCount();
- }
-
public long getPipeMaxAllowedLinkedTsFileCount() {
return COMMON_CONFIG.getPipeMaxAllowedLinkedTsFileCount();
}
- public float getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage() {
- return COMMON_CONFIG.getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage();
- }
-
- public long getPipeStuckRestartIntervalSeconds() {
- return COMMON_CONFIG.getPipeStuckRestartIntervalSeconds();
- }
-
- public long getPipeStuckRestartMinIntervalMs() {
- return COMMON_CONFIG.getPipeStuckRestartMinIntervalMs();
- }
-
- public boolean isPipeEpochKeepTsFileAfterStuckRestartEnabled() {
- return COMMON_CONFIG.isPipeEpochKeepTsFileAfterStuckRestartEnabled();
- }
-
- public long getPipeFlushAfterTerminateCount() {
- return COMMON_CONFIG.getPipeFlushAfterTerminateCount();
- }
-
- public long getPipeFlushAfterLastTerminateSeconds() {
- return COMMON_CONFIG.getPipeFlushAfterLastTerminateSeconds();
- }
-
- public long getPipeStorageEngineFlushTimeIntervalMs() {
- return COMMON_CONFIG.getPipeStorageEngineFlushTimeIntervalMs();
- }
-
- public int getPipeMaxAllowedRemainingInsertEventCountPerPipe() {
- return COMMON_CONFIG.getPipeMaxAllowedRemainingInsertEventCountPerPipe();
- }
-
- public int getPipeMaxAllowedTotalRemainingInsertEventCount() {
- return COMMON_CONFIG.getPipeMaxAllowedTotalRemainingInsertEventCount();
- }
-
/////////////////////////////// Logger ///////////////////////////////
public int getPipeMetaReportMaxLogNumPerRound() {
@@ -401,14 +375,6 @@
return COMMON_CONFIG.getPipeTsFilePinMaxLogIntervalRounds();
}
- public int getPipeWalPinMaxLogNumPerRound() {
- return COMMON_CONFIG.getPipeWalPinMaxLogNumPerRound();
- }
-
- public int getPipeWalPinMaxLogIntervalRounds() {
- return COMMON_CONFIG.getPipeWalPinMaxLogIntervalRounds();
- }
-
/////////////////////////////// Memory ///////////////////////////////
public boolean getPipeMemoryManagementEnabled() {
@@ -468,10 +434,6 @@
private static final Logger LOGGER = LoggerFactory.getLogger(PipeConfig.class);
public void printAllConfigs() {
- LOGGER.info(
- "PipeNonForwardingEventsProgressReportInterval: {}",
- getPipeNonForwardingEventsProgressReportInterval());
-
LOGGER.info("PipeHardlinkBaseDirName: {}", getPipeHardlinkBaseDirName());
LOGGER.info("PipeHardlinkTsFileDirName: {}", getPipeHardlinkTsFileDirName());
LOGGER.info("PipeProgressIndexPersistDirName: {}", getPipeProgressIndexPersistDirName());
@@ -479,8 +441,6 @@
LOGGER.info(
"PipeProgressIndexPersistCheckPointGap: {}", getPipeProgressIndexPersistCheckPointGap());
LOGGER.info("PipeProgressIndexFlushIntervalMs: {}", getPipeProgressIndexFlushIntervalMs());
- LOGGER.info("PipeHardlinkWALDirName: {}", getPipeHardlinkWALDirName());
- LOGGER.info("PipeHardLinkWALEnabled: {}", getPipeHardLinkWALEnabled());
LOGGER.info("PipeFileReceiverFsyncEnabled: {}", getPipeFileReceiverFsyncEnabled());
LOGGER.info("PipeDataStructureTabletRowSize: {}", getPipeDataStructureTabletRowSize());
@@ -494,6 +454,16 @@
LOGGER.info("PipeTotalFloatingMemoryProportion: {}", getPipeTotalFloatingMemoryProportion());
LOGGER.info(
+ "PipeDataStructureBatchMemoryProportion: {}", getPipeDataStructureBatchMemoryProportion());
+ LOGGER.info("IsPipeEnableMemoryCheck: {}", isPipeEnableMemoryCheck());
+ LOGGER.info("PipeTsFileParserMemory: {}", getTsFileParserMemory());
+ LOGGER.info("SinkBatchMemoryInsertNode: {}", getSinkBatchMemoryInsertNode());
+ LOGGER.info("SinkBatchMemoryTsFile: {}", getSinkBatchMemoryTsFile());
+ LOGGER.info("SendTsFileReadBuffer: {}", getSendTsFileReadBuffer());
+ LOGGER.info("PipeReservedMemoryPercentage: {}", getReservedMemoryPercentage());
+ LOGGER.info("PipeMinimumReceiverMemory: {}", getPipeMinimumReceiverMemory());
+
+ LOGGER.info(
"PipeRealTimeQueuePollTsFileThreshold: {}", getPipeRealTimeQueuePollTsFileThreshold());
LOGGER.info(
"PipeRealTimeQueuePollHistoricalTsFileThreshold: {}",
@@ -522,7 +492,6 @@
LOGGER.info(
"PipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes: {}",
getPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes());
- LOGGER.info("PipeExtractorMatcherCacheSize: {}", getPipeExtractorMatcherCacheSize());
LOGGER.info("PipeConnectorHandshakeTimeoutMs: {}", getPipeConnectorHandshakeTimeoutMs());
LOGGER.info("PipeConnectorTransferTimeoutMs: {}", getPipeConnectorTransferTimeoutMs());
@@ -621,33 +590,12 @@
LOGGER.info(
"PipeMaxAllowedPendingTsFileEpochPerDataRegion: {}",
getPipeMaxAllowedPendingTsFileEpochPerDataRegion());
- LOGGER.info("PipeMaxAllowedPinnedMemTableCount: {}", getPipeMaxAllowedPinnedMemTableCount());
LOGGER.info("PipeMaxAllowedLinkedTsFileCount: {}", getPipeMaxAllowedLinkedTsFileCount());
- LOGGER.info(
- "PipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage: {}",
- getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage());
- LOGGER.info("PipeStuckRestartIntervalSeconds: {}", getPipeStuckRestartIntervalSeconds());
- LOGGER.info("PipeStuckRestartMinIntervalMs: {}", getPipeStuckRestartMinIntervalMs());
- LOGGER.info(
- "PipeEpochKeepTsFileAfterStuckRestartEnabled: {}",
- isPipeEpochKeepTsFileAfterStuckRestartEnabled());
- LOGGER.info("PipeFlushAfterTerminateCount: {}", getPipeFlushAfterTerminateCount());
- LOGGER.info("PipeFlushAfterLastTerminateSeconds: {}", getPipeFlushAfterLastTerminateSeconds());
- LOGGER.info(
- "PipeStorageEngineFlushTimeIntervalMs: {}", getPipeStorageEngineFlushTimeIntervalMs());
- LOGGER.info(
- "PipeMaxAllowedRemainingInsertEventCountPerPipe: {}",
- getPipeMaxAllowedRemainingInsertEventCountPerPipe());
- LOGGER.info(
- "PipeMaxAllowedTotalRemainingInsertEventCount: {}",
- getPipeMaxAllowedTotalRemainingInsertEventCount());
LOGGER.info("PipeMetaReportMaxLogNumPerRound: {}", getPipeMetaReportMaxLogNumPerRound());
LOGGER.info("PipeMetaReportMaxLogIntervalRounds: {}", getPipeMetaReportMaxLogIntervalRounds());
LOGGER.info("PipeTsFilePinMaxLogNumPerRound: {}", getPipeTsFilePinMaxLogNumPerRound());
LOGGER.info("PipeTsFilePinMaxLogIntervalRounds: {}", getPipeTsFilePinMaxLogIntervalRounds());
- LOGGER.info("PipeWalPinMaxLogNumPerRound: {}", getPipeWalPinMaxLogNumPerRound());
- LOGGER.info("PipeWalPinMaxLogIntervalRounds: {}", getPipeWalPinMaxLogIntervalRounds());
LOGGER.info("PipeMemoryManagementEnabled: {}", getPipeMemoryManagementEnabled());
LOGGER.info("PipeMemoryAllocateMaxRetries: {}", getPipeMemoryAllocateMaxRetries());
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java
index 7086ff7..2938783 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java
@@ -49,13 +49,6 @@
config.setPipeProgressIndexPersistDirName(
properties.getProperty(
"pipe_progress_index_persist_dir_name", config.getPipeProgressIndexPersistDirName()));
- config.setPipeHardlinkWALDirName(
- properties.getProperty("pipe_hardlink_wal_dir_name", config.getPipeHardlinkWALDirName()));
- config.setPipeHardLinkWALEnabled(
- Boolean.parseBoolean(
- properties.getProperty(
- "pipe_hardlink_wal_enabled",
- Boolean.toString(config.getPipeHardLinkWALEnabled()))));
int pipeSubtaskExecutorMaxThreadNum =
Integer.parseInt(
properties.getProperty(
@@ -147,17 +140,6 @@
properties.getProperty(
"pipe_tsfile_pin_max_log_interval_rounds",
String.valueOf(config.getPipeTsFilePinMaxLogIntervalRounds()))));
- config.setPipeWalPinMaxLogNumPerRound(
- Integer.parseInt(
- properties.getProperty(
- "pipe_wal_pin_max_log_num_per_round",
- String.valueOf(config.getPipeWalPinMaxLogNumPerRound()))));
- config.setPipeWalPinMaxLogIntervalRounds(
- Integer.parseInt(
- properties.getProperty(
- "pipe_wal_pin_max_log_interval_rounds",
- String.valueOf(config.getPipeWalPinMaxLogIntervalRounds()))));
-
config.setPipeMemoryManagementEnabled(
Boolean.parseBoolean(
properties.getProperty(
@@ -217,14 +199,7 @@
.trim()));
}
- public static void loadPipeInternalConfig(CommonConfig config, TrimProperties properties)
- throws IOException {
- config.setPipeNonForwardingEventsProgressReportInterval(
- Integer.parseInt(
- properties.getProperty(
- "pipe_non_forwarding_events_progress_report_interval",
- Integer.toString(config.getPipeNonForwardingEventsProgressReportInterval()))));
-
+ public static void loadPipeInternalConfig(CommonConfig config, TrimProperties properties) {
config.setPipeFileReceiverFsyncEnabled(
Boolean.parseBoolean(
properties.getProperty(
@@ -253,11 +228,6 @@
"pipe_data_structure_ts_file_memory_block_allocation_reject_threshold",
String.valueOf(
config.getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold()))));
- config.setPipeDataStructureWalMemoryProportion(
- Double.parseDouble(
- properties.getProperty(
- "pipe_data_structure_wal_memory_proportion",
- String.valueOf(config.getPipeDataStructureWalMemoryProportion()))));
config.setPipeDataStructureBatchMemoryProportion(
Double.parseDouble(
properties.getProperty(
@@ -269,6 +239,45 @@
"pipe_total_floating_memory_proportion",
String.valueOf(config.getPipeTotalFloatingMemoryProportion()))));
+ config.setIsPipeEnableMemoryChecked(
+ Boolean.parseBoolean(
+ properties.getProperty(
+ "pipe_enable_memory_checked", String.valueOf(config.isPipeEnableMemoryChecked()))));
+ config.setPipeInsertNodeQueueMemory(
+ Long.parseLong(
+ properties.getProperty(
+ "pipe_insert_node_queue_memory",
+ String.valueOf(config.getPipeInsertNodeQueueMemory()))));
+ config.setPipeTsFileParserMemory(
+ Long.parseLong(
+ properties.getProperty(
+ "pipe_tsfile_parser_memory", String.valueOf(config.getPipeTsFileParserMemory()))));
+ config.setPipeSinkBatchMemoryInsertNode(
+ Long.parseLong(
+ properties.getProperty(
+ "pipe_sink_batch_memory_insert_node",
+ String.valueOf(config.getPipeSinkBatchMemoryInsertNode()))));
+ config.setPipeSinkBatchMemoryTsFile(
+ Long.parseLong(
+ properties.getProperty(
+ "pipe_sink_batch_memory_ts_file",
+ String.valueOf(config.getPipeSinkBatchMemoryTsFile()))));
+ config.setPipeSendTsFileReadBuffer(
+ Long.parseLong(
+ properties.getProperty(
+ "pipe_send_tsfile_read_buffer",
+ String.valueOf(config.getPipeSendTsFileReadBuffer()))));
+ config.setPipeReservedMemoryPercentage(
+ Double.parseDouble(
+ properties.getProperty(
+ "pipe_reserved_memory_percentage",
+ String.valueOf(config.getPipeReservedMemoryPercentage()))));
+ config.setPipeMinimumReceiverMemory(
+ Long.parseLong(
+ properties.getProperty(
+ "pipe_minimum_receiver_memory",
+ String.valueOf(config.getPipeMinimumReceiverMemory()))));
+
config.setPipeRealTimeQueuePollTsFileThreshold(
Integer.parseInt(
Optional.ofNullable(
@@ -330,13 +339,6 @@
String.valueOf(
config
.getPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes())))));
- config.setPipeExtractorMatcherCacheSize(
- Integer.parseInt(
- Optional.ofNullable(properties.getProperty("pipe_source_matcher_cache_size"))
- .orElse(
- properties.getProperty(
- "pipe_extractor_matcher_cache_size",
- String.valueOf(config.getPipeExtractorMatcherCacheSize())))));
config.setPipeConnectorHandshakeTimeoutMs(
Long.parseLong(
@@ -446,61 +448,11 @@
properties.getProperty(
"pipe_max_allowed_pending_tsfile_epoch_per_data_region",
String.valueOf(config.getPipeMaxAllowedPendingTsFileEpochPerDataRegion()))));
- config.setPipeMaxAllowedPinnedMemTableCount(
- Integer.parseInt(
- properties.getProperty(
- "pipe_max_allowed_pinned_memtable_count",
- String.valueOf(config.getPipeMaxAllowedPinnedMemTableCount()))));
config.setPipeMaxAllowedLinkedTsFileCount(
Long.parseLong(
properties.getProperty(
"pipe_max_allowed_linked_tsfile_count",
String.valueOf(config.getPipeMaxAllowedLinkedTsFileCount()))));
- config.setPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage(
- Float.parseFloat(
- properties.getProperty(
- "pipe_max_allowed_linked_deleted_tsfile_disk_usage_percentage",
- String.valueOf(config.getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage()))));
- config.setPipeStuckRestartIntervalSeconds(
- Long.parseLong(
- properties.getProperty(
- "pipe_stuck_restart_interval_seconds",
- String.valueOf(config.getPipeStuckRestartIntervalSeconds()))));
- config.setPipeMaxAllowedRemainingInsertEventCountPerPipe(
- Integer.parseInt(
- properties.getProperty(
- "pipe_max_allowed_remaining_insert_event_count_per_pipe",
- String.valueOf(config.getPipeMaxAllowedRemainingInsertEventCountPerPipe()))));
- config.setPipeMaxAllowedTotalRemainingInsertEventCount(
- Integer.parseInt(
- properties.getProperty(
- "pipe_max_allowed_total_remaining_insert_event_count",
- String.valueOf(config.getPipeMaxAllowedTotalRemainingInsertEventCount()))));
- config.setPipeStuckRestartMinIntervalMs(
- Long.parseLong(
- properties.getProperty(
- "pipe_stuck_restart_min_interval_ms",
- String.valueOf(config.getPipeStuckRestartMinIntervalMs()))));
- config.setPipeFlushAfterLastTerminateSeconds(
- Long.parseLong(
- properties.getProperty(
- "pipe_flush_after_last_terminate_seconds",
- String.valueOf(config.getPipeFlushAfterLastTerminateSeconds()))));
- config.setPipeFlushAfterTerminateCount(
- Long.parseLong(
- properties.getProperty(
- "pipe_flush_after_terminate_count",
- String.valueOf(config.getPipeFlushAfterTerminateCount()))));
- config.setPipeEpochKeepTsFileAfterStuckRestartEnabled(
- Boolean.parseBoolean(
- properties.getProperty(
- "pipe_epoch_keep_tsfile_after_stuck_restart_enabled",
- String.valueOf(config.isPipeEpochKeepTsFileAfterStuckRestartEnabled()))));
- config.setPipeStorageEngineFlushTimeIntervalMs(
- Long.parseLong(
- properties.getProperty(
- "pipe_storage_engine_flush_time_interval_ms",
- String.valueOf(config.getPipeStorageEngineFlushTimeIntervalMs()))));
config.setPipeMemoryAllocateMaxRetries(
Integer.parseInt(
@@ -591,6 +543,11 @@
"pipe_threshold_allocation_strategy_high_usage_threshold",
String.valueOf(
config.getPipeThresholdAllocationStrategyFixedMemoryHighUsageThreshold()))));
+
+ config.setPipeMaxWaitFinishTime(
+ Long.parseLong(
+ properties.getProperty(
+ "pipe_max_wait_finish_time", String.valueOf(config.getPipeMaxWaitFinishTime()))));
}
public static void loadPipeExternalConfig(
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java
index 394ff7a..c678093 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java
@@ -75,7 +75,7 @@
public static final String CONNECTOR_IOTDB_BATCH_SIZE_KEY = "connector.batch.size-bytes";
public static final String SINK_IOTDB_BATCH_SIZE_KEY = "sink.batch.size-bytes";
public static final long CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE = MB;
- public static final long CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE = 2 * MB;
+ public static final long CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE = MB;
public static final String CONNECTOR_IOTDB_USER_KEY = "connector.user";
public static final String SINK_IOTDB_USER_KEY = "sink.user";
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java
index 909d43b..c61abbe 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java
@@ -82,7 +82,6 @@
version = transferReq.version;
type = transferReq.type;
- body = transferReq.body;
return this;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java
index 0bc0342..157d73e 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java
@@ -71,7 +71,6 @@
version = req.version;
type = req.type;
- body = req.body;
return this;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java
index 3ed9999..86cce02 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java
@@ -109,7 +109,6 @@
version = req.version;
type = req.type;
- body = req.body;
return this;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java
index 7c0330a..d4fb192 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java
@@ -62,7 +62,6 @@
version = transferReq.version;
type = transferReq.type;
- body = transferReq.body;
return this;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java
index 8ed63cd..c6fd0f9 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java
@@ -74,7 +74,6 @@
version = transferReq.version;
type = transferReq.type;
- body = transferReq.body;
return this;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java
index 4df6008..1041cb2 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java
@@ -128,7 +128,6 @@
sliceReq.version = transferReq.version;
sliceReq.type = transferReq.type;
- sliceReq.body = transferReq.body;
return sliceReq;
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java
index a9dae77..374bc54 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java
@@ -32,6 +32,7 @@
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -395,10 +396,22 @@
return committerKey;
}
+ public boolean hasMultipleCommitIds() {
+ return false;
+ }
+
public long getCommitId() {
return commitId;
}
+ public List<EnrichedEvent> getDummyEventsForCommitIds() {
+ return Collections.emptyList();
+ }
+
+ public List<Long> getCommitIds() {
+ return Collections.singletonList(commitId);
+ }
+
public void onCommitted() {
onCommittedHooks.forEach(Supplier::get);
}
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
index 145b4ad..1b177ee 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
@@ -156,12 +156,8 @@
PIPE_CONNECTOR_TSFILE_TRANSFER("pipe_connector_tsfile_transfer"),
PIPE_CONNECTOR_HEARTBEAT_TRANSFER("pipe_connector_heartbeat_transfer"),
PIPE_HEARTBEAT_EVENT("pipe_heartbeat_event"),
- PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE("pipe_wal_insert_node_cache_hit_rate"),
- PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT("pipe_wal_insert_node_cache_hit_count"),
- PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT("pipe_wal_insert_node_cache_request_count"),
PIPE_EXTRACTOR_TSFILE_EPOCH_STATE("pipe_extractor_tsfile_epoch_state"),
PIPE_MEM("pipe_mem"),
- PIPE_PINNED_MEMTABLE_COUNT("pipe_pinned_memtable_count"),
PIPE_LINKED_TSFILE_COUNT("pipe_linked_tsfile_count"),
PIPE_LINKED_TSFILE_SIZE("pipe_linked_tsfile_size"),
PIPE_PHANTOM_REFERENCE_COUNT("pipe_phantom_reference_count"),
@@ -177,6 +173,7 @@
PIPE_INSERT_NODE_EVENT_TRANSFER_TIME("pipe_insert_node_event_transfer_time"),
PIPE_TSFILE_EVENT_TRANSFER_TIME("pipe_tsfile_event_transfer_time"),
PIPE_DATANODE_EVENT_TRANSFER("pipe_datanode_event_transfer"),
+ PIPE_FLOATING_MEMORY_USAGE("pipe_floating_memory_usage"),
PIPE_CONFIG_LINKED_QUEUE_SIZE("pipe_config_linked_queue_size"),
UNTRANSFERRED_CONFIG_COUNT("untransferred_config_count"),
PIPE_CONNECTOR_CONFIG_TRANSFER("pipe_connector_config_transfer"),
diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java
index 7597b08..04577f4 100644
--- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java
+++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/FileUtils.java
@@ -114,6 +114,23 @@
}
}
+ public static boolean deleteFileChildrenQuietly(final File file) {
+ if (!file.exists() || !file.isDirectory()) {
+ return false;
+ }
+
+ boolean result = true;
+ final File[] items = file.listFiles();
+ if (items != null) {
+ for (final File item : items) {
+ if (item.isFile()) {
+ result &= org.apache.commons.io.FileUtils.deleteQuietly(item);
+ }
+ }
+ }
+ return result;
+ }
+
public static void deleteDirectoryAndEmptyParent(File folder) {
deleteFileOrDirectory(folder);
final File parentFolder = folder.getParentFile();