Remove implied profanity from error messages. (#10270)
i.e. WTF, WTH.
diff --git a/core/src/main/java/org/apache/druid/guice/ConfigProvider.java b/core/src/main/java/org/apache/druid/guice/ConfigProvider.java
index a6ca07b..538f2a6 100644
--- a/core/src/main/java/org/apache/druid/guice/ConfigProvider.java
+++ b/core/src/main/java/org/apache/druid/guice/ConfigProvider.java
@@ -29,6 +29,7 @@
import java.util.Map;
/**
+ *
*/
public class ConfigProvider<T> implements Provider<T>
{
@@ -79,7 +80,7 @@
{
try {
// ConfigMagic handles a null replacements
- Preconditions.checkNotNull(factory, "WTF!? Code misconfigured, inject() didn't get called.");
+ Preconditions.checkNotNull(factory, "Code misconfigured, inject() didn't get called.");
return factory.buildWithReplacements(clazz, replacements);
}
catch (IllegalArgumentException e) {
diff --git a/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java b/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java
index f8e5d83..6d4de93 100644
--- a/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java
+++ b/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java
@@ -205,7 +205,7 @@
bytesWritten += bytesWrittenInChunk;
if (bytesWritten != currOut.getCurrOffset() - startOffset) {
- throw new ISE("WTF? Perhaps there is some concurrent modification going on?");
+ throw new ISE("Perhaps there is some concurrent modification going on?");
}
if (bytesWritten > size) {
throw new ISE("Wrote[%,d] bytes for something of size[%,d]. Liar!!!", bytesWritten, size);
@@ -228,7 +228,7 @@
writerCurrentlyInUse = false;
if (bytesWritten != currOut.getCurrOffset() - startOffset) {
- throw new ISE("WTF? Perhaps there is some concurrent modification going on?");
+ throw new ISE("Perhaps there is some concurrent modification going on?");
}
if (bytesWritten != size) {
throw new IOE("Expected [%,d] bytes, only saw [%,d], potential corruption?", size, bytesWritten);
diff --git a/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java b/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java
index bd1cf9d..8306dca 100644
--- a/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java
+++ b/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java
@@ -162,16 +162,6 @@
log.error("ASSERTION_ERROR: " + message, formatArgs);
}
- public void wtf(String message, Object... formatArgs)
- {
- error(message, formatArgs);
- }
-
- public void wtf(Throwable t, String message, Object... formatArgs)
- {
- error(t, message, formatArgs);
- }
-
public void debugSegments(@Nullable final Collection<DataSegment> segments, @Nullable String preamble)
{
if (log.isDebugEnabled()) {
diff --git a/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java b/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java
index 40570f0..8129796 100644
--- a/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java
+++ b/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java
@@ -224,7 +224,7 @@
deficit--;
poolVal = null;
} else {
- throw new IllegalStateException("WTF?! No objects left, and no object deficit. This is probably a bug.");
+ throw new IllegalStateException("Unexpected state: No objects left, and no object deficit");
}
}
diff --git a/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java b/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java
index c3247d6..e395a71 100644
--- a/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java
+++ b/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java
@@ -151,7 +151,7 @@
}
catch (IOException e) {
// This should never happen
- log.wtf(e, "The empty stream threw an IOException");
+ log.error(e, "The empty stream threw an IOException");
throw new RuntimeException(e);
}
finally {
diff --git a/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java b/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java
index a4529f3..8d010cf 100644
--- a/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java
+++ b/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java
@@ -287,6 +287,7 @@
* @param minorVersion the minor version to check overshadow relation. The found groups will have lower minor versions
* than this.
* @param fromState the state to search for overshadowed groups.
+ *
* @return a list of found atomicUpdateGroups. It could be empty if no groups are found.
*/
@VisibleForTesting
@@ -333,6 +334,7 @@
* @param minorVersion the minor version to check overshadow relation. The found groups will have higher minor
* versions than this.
* @param fromState the state to search for overshadowed groups.
+ *
* @return a list of found atomicUpdateGroups. It could be empty if no groups are found.
*/
@VisibleForTesting
@@ -438,9 +440,9 @@
* The given standby group can be visible in the below two cases:
*
* - The standby group is full. Since every standby group has a higher version than the current visible group,
- * it should become visible immediately when it's full.
+ * it should become visible immediately when it's full.
* - The standby group is not full but not empty and the current visible is not full. If there's no fully available
- * group, the group of the highest version should be the visible.
+ * group, the group of the highest version should be the visible.
*/
private void moveNewStandbyToVisibleIfNecessary(AtomicUpdateGroup<T> standbyGroup, State stateOfGroup)
{
@@ -530,7 +532,7 @@
findOvershadows(group, State.STANDBY)
);
if (overshadowingStandbys.isEmpty()) {
- throw new ISE("WTH? atomicUpdateGroup[%s] is in overshadowed state, but no one overshadows it?", group);
+ throw new ISE("Unexpected state: atomicUpdateGroup[%s] is overshadowed, but nothing overshadows it", group);
}
groupsOvershadowingAug = overshadowingStandbys;
isOvershadowingGroupsFull = false;
@@ -585,6 +587,7 @@
* @param groups atomicUpdateGroups sorted by their rootPartitionRange
* @param startRootPartitionId the start partitionId of the root partition range to check the coverage
* @param endRootPartitionId the end partitionId of the root partition range to check the coverage
+ *
* @return true if the given groups fully cover the given partition range.
*/
private boolean doGroupsFullyCoverPartitionRange(
@@ -675,7 +678,7 @@
// If this chunk is already in the atomicUpdateGroup, it should be in knownPartitionChunks
// and this code must not be executed.
throw new ISE(
- "WTH? chunk[%s] is in the atomicUpdateGroup[%s] but not in knownPartitionChunks[%s]?",
+ "Unexpected state: chunk[%s] is in the atomicUpdateGroup[%s] but not in knownPartitionChunks[%s]",
chunk,
atomicUpdateGroup,
knownPartitionChunks
@@ -875,7 +878,7 @@
if (!removed.equals(aug)) {
throw new ISE(
- "WTH? actually removed atomicUpdateGroup[%s] is different from the one which is supposed to be[%s]",
+ "Unexpected state: Removed atomicUpdateGroup[%s] is different from expected atomicUpdateGroup[%s]",
removed,
aug
);
@@ -896,7 +899,7 @@
if (!knownChunk.equals(partitionChunk)) {
throw new ISE(
- "WTH? Same partitionId[%d], but known partition[%s] is different from the input partition[%s]",
+ "Unexpected state: Same partitionId[%d], but known partition[%s] is different from the input partition[%s]",
partitionChunk.getChunkNumber(),
knownChunk,
partitionChunk
@@ -932,7 +935,8 @@
(SingleEntryShort2ObjectSortedMap<AtomicUpdateGroup<T>>) map;
//noinspection ConstantConditions
return singleMap.val.isFull();
- });
+ }
+ );
}
@Nullable
diff --git a/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java b/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java
index 16a34f1..bd34967 100644
--- a/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java
+++ b/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java
@@ -101,7 +101,7 @@
{
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("thingToValidate cannot contain whitespace character except space.");
- IdUtils.validateId(THINGO, "wtf\u000Bis line tabulation");
+ IdUtils.validateId(THINGO, "what\u000Bis line tabulation");
}
@Test
diff --git a/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java b/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java
index a024180..6b4bbd5 100644
--- a/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java
+++ b/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java
@@ -107,7 +107,7 @@
public void emit(Event event)
{
if (!started.get()) {
- throw new ISE("WTF emit was called while service is not started yet");
+ throw new ISE("Emit called unexpectedly before service start");
}
if (event instanceof ServiceMetricEvent) {
final TimelineMetric timelineEvent = timelineMetricConverter.druidEventToTimelineMetric((ServiceMetricEvent) event);
diff --git a/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java b/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java
index 13ffb48..b3739ab 100644
--- a/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java
+++ b/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java
@@ -99,7 +99,7 @@
public void emit(Event event)
{
if (!started.get()) {
- throw new ISE("WTF emit was called while service is not started yet");
+ throw new ISE("Emit called unexpectedly before service start");
}
if (event instanceof ServiceMetricEvent) {
final GraphiteEvent graphiteEvent = graphiteEventConverter.druidEventToGraphite((ServiceMetricEvent) event);
@@ -152,14 +152,14 @@
{
if (graphiteEmitterConfig.getProtocol().equals(GraphiteEmitterConfig.PLAINTEXT_PROTOCOL)) {
graphite = new Graphite(
- graphiteEmitterConfig.getHostname(),
- graphiteEmitterConfig.getPort()
+ graphiteEmitterConfig.getHostname(),
+ graphiteEmitterConfig.getPort()
);
} else {
graphite = new PickledGraphite(
- graphiteEmitterConfig.getHostname(),
- graphiteEmitterConfig.getPort(),
- graphiteEmitterConfig.getBatchSize()
+ graphiteEmitterConfig.getHostname(),
+ graphiteEmitterConfig.getPort(),
+ graphiteEmitterConfig.getBatchSize()
);
}
log.info("Using %s protocol.", graphiteEmitterConfig.getProtocol());
diff --git a/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java b/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java
index 41c2413..67b95eb 100644
--- a/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java
+++ b/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java
@@ -66,7 +66,7 @@
public void emit(Event event)
{
if (!started.get()) {
- throw new ISE("WTF emit was called while service is not started yet");
+ throw new ISE("Emit called unexpectedly before service start");
}
if (event instanceof ServiceMetricEvent) {
OpentsdbEvent opentsdbEvent = converter.convert((ServiceMetricEvent) event);
diff --git a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java
index 58f4984..5ccd04b 100644
--- a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java
+++ b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java
@@ -206,7 +206,7 @@
}
}
catch (Exception e) {
- LOG.makeAlert(e, "WTF? Could not deserialize user/role map received from coordinator.").emit();
+ LOG.makeAlert(e, "Could not deserialize user/role map received from coordinator").emit();
}
}
diff --git a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java
index 7dd8aba..1210e1c 100644
--- a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java
+++ b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java
@@ -39,7 +39,7 @@
ByteBuffer other = selector.getObject();
if (other == null) {
// nulls should be empty bloom filters by this point, so encountering a nil column in merge agg is unexpected
- throw new ISE("WTF?! Unexpected null value in BloomFilterMergeAggregator");
+ throw new ISE("Unexpected null value in BloomFilterMergeAggregator");
}
BloomKFilter.mergeBloomFilterByteBuffers(buf, buf.position(), other, other.position());
}
diff --git a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java
index ed5ce29..7d74432 100644
--- a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java
+++ b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java
@@ -76,7 +76,7 @@
final BaseNullableColumnValueSelector selector = metricFactory.makeColumnValueSelector(fieldName);
// null columns should be empty bloom filters by this point, so encountering a nil column in merge agg is unexpected
if (selector instanceof NilColumnValueSelector) {
- throw new ISE("WTF?! Unexpected NilColumnValueSelector");
+ throw new ISE("Unexpected NilColumnValueSelector");
}
return new BloomFilterMergeAggregator((ColumnValueSelector<ByteBuffer>) selector, getMaxNumEntries(), true);
}
diff --git a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java
index abc63c8..b72461d 100644
--- a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java
+++ b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java
@@ -2594,10 +2594,10 @@
}
Assert.assertTrue(
- serviceEmitter.getStackTrace().startsWith("org.apache.druid.java.util.common.ISE: WTH?! cannot find")
+ serviceEmitter.getStackTrace().startsWith("org.apache.druid.java.util.common.ISE: Cannot find")
);
Assert.assertEquals(
- "WTH?! cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]",
+ "Cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]",
serviceEmitter.getExceptionMessage()
);
Assert.assertEquals(ISE.class, serviceEmitter.getExceptionClass());
diff --git a/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java b/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java
index 65ed7d1..7ca3b04 100644
--- a/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java
+++ b/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java
@@ -3194,9 +3194,9 @@
}
Assert.assertTrue(serviceEmitter.getStackTrace()
- .startsWith("org.apache.druid.java.util.common.ISE: WTH?! cannot find"));
+ .startsWith("org.apache.druid.java.util.common.ISE: Cannot find"));
Assert.assertEquals(
- "WTH?! cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]",
+ "Cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]",
serviceEmitter.getExceptionMessage()
);
Assert.assertEquals(ISE.class, serviceEmitter.getExceptionClass());
diff --git a/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java b/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java
index 10ccc9b..0b1b14c 100644
--- a/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java
+++ b/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java
@@ -119,7 +119,7 @@
}
final CacheRefKeeper cacheRefKeeper = refOfCacheKeeper.get();
if (cacheRefKeeper == null) {
- throw new ISE("Cache reference is null WTF");
+ throw new ISE("Cache reference is null");
}
final PollingCache cache = cacheRefKeeper.getAndIncrementRef();
try {
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java
index 62a13d3..b390619 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java
@@ -239,7 +239,8 @@
Map<String, Object> metrics = TaskMetricsUtils.makeIngestionRowMetrics(
jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).getValue(),
- jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER).getValue(),
+ jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER)
+ .getValue(),
jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER).getValue(),
jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_THROWN_AWAY_COUNTER).getValue()
);
@@ -318,7 +319,7 @@
.bucketInterval(DateTimes.utc(inputRow.getTimestampFromEpoch()));
if (!maybeInterval.isPresent()) {
- throw new ISE("WTF?! No bucket found for timestamp: %s", inputRow.getTimestampFromEpoch());
+ throw new ISE("No bucket found for timestamp: %s", inputRow.getTimestampFromEpoch());
}
interval = maybeInterval.get();
}
@@ -387,7 +388,7 @@
Optional<Interval> intervalOptional = config.getGranularitySpec().bucketInterval(DateTimes.utc(key.get()));
if (!intervalOptional.isPresent()) {
- throw new ISE("WTF?! No bucket found for timestamp: %s", key.get());
+ throw new ISE("No bucket found for timestamp: %s", key.get());
}
interval = intervalOptional.get();
}
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
index 0b246be..1e810c6 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
@@ -464,7 +464,7 @@
final Optional<Interval> maybeInterval = config.getGranularitySpec().bucketInterval(timestamp);
if (!maybeInterval.isPresent()) {
- throw new ISE("WTF?! No bucket found for timestamp: %s", timestamp);
+ throw new ISE("No bucket found for timestamp: %s", timestamp);
}
final Interval interval = maybeInterval.get();
@@ -627,7 +627,7 @@
final long totalRows = firstDvc.numRows;
if (!"".equals(firstDvc.dim) || !"".equals(firstDvc.value)) {
- throw new IllegalStateException("WTF?! Expected total row indicator on first k/v pair!");
+ throw new IllegalStateException("Expected total row indicator on first k/v pair");
}
// "iterator" will now take us over many candidate dimensions
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java
index 72bbaee..e2acce7 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java
@@ -355,7 +355,7 @@
final Optional<Bucket> bucket = getConfig().getBucket(inputRow);
if (!bucket.isPresent()) {
- throw new ISE("WTF?! No bucket found for row: %s", inputRow);
+ throw new ISE("No bucket found for row: %s", inputRow);
}
final long truncatedTimestamp = granularitySpec.getQueryGranularity()
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java
index 2bb13d9..586abe0 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java
@@ -68,6 +68,7 @@
import org.apache.druid.query.NoopQueryRunner;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryRunner;
+import org.apache.druid.segment.SegmentUtils;
import org.apache.druid.segment.indexing.DataSchema;
import org.apache.druid.segment.indexing.RealtimeIOConfig;
import org.apache.druid.segment.realtime.FireDepartment;
@@ -318,7 +319,10 @@
final TransactionalSegmentPublisher publisher = (mustBeNullOrEmptySegments, segments, commitMetadata) -> {
if (mustBeNullOrEmptySegments != null && !mustBeNullOrEmptySegments.isEmpty()) {
- throw new ISE("WTH? stream ingestion tasks are overwriting segments[%s]", mustBeNullOrEmptySegments);
+ throw new ISE(
+ "Stream ingestion task unexpectedly attempted to overwrite segments: %s",
+ SegmentUtils.commaSeparatedIdentifiers(mustBeNullOrEmptySegments)
+ );
}
final SegmentTransactionalInsertAction action = SegmentTransactionalInsertAction.appendAction(
segments,
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java
index 9f65099..4e048ad 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java
@@ -73,7 +73,7 @@
for (final DataSegment unusedSegment : unusedSegments) {
if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
throw new ISE(
- "WTF?! Unused segment[%s] has version[%s] > task version[%s]",
+ "Unused segment[%s] has version[%s] > task version[%s]",
unusedSegment.getId(),
unusedSegment.getVersion(),
myLock.getVersion()
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java
index 4a0b3ac..152eb330 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java
@@ -81,7 +81,7 @@
for (final DataSegment unusedSegment : unusedSegments) {
if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
throw new ISE(
- "WTF?! Unused segment[%s] has version[%s] > task version[%s]",
+ "Unused segment[%s] has version[%s] > task version[%s]",
unusedSegment.getId(),
unusedSegment.getVersion(),
myLock.getVersion()
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java
index 79aa396..b4c7520 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java
@@ -212,7 +212,7 @@
runThread = Thread.currentThread();
if (this.plumber != null) {
- throw new IllegalStateException("WTF?!? run with non-null plumber??!");
+ throw new IllegalStateException("Plumber must be null");
}
setupTimeoutAlert();
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java
index 2978859..622c9ff 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java
@@ -74,7 +74,7 @@
for (final DataSegment unusedSegment : unusedSegments) {
if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
throw new ISE(
- "WTF?! Unused segment[%s] has version[%s] > task version[%s]",
+ "Unused segment[%s] has version[%s] > task version[%s]",
unusedSegment.getId(),
unusedSegment.getVersion(),
myLock.getVersion()
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java
index 972a7e4..099b24a 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java
@@ -146,7 +146,7 @@
final String mustBeNull = intervalToVersion.put(lock.getInterval(), lock.getVersion());
if (mustBeNull != null) {
throw new ISE(
- "WTH? Two versions([%s], [%s]) for the same interval[%s]?",
+ "Unexpected state: Two versions([%s], [%s]) for the same interval[%s]",
lock.getVersion(),
mustBeNull,
lock.getInterval()
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java
index 30dcec2..9fa8a28 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java
@@ -95,7 +95,7 @@
final Task task = jsonMapper.readValue(taskFile, Task.class);
if (!task.getId().equals(taskId)) {
- throw new ISE("WTF?! Task[%s] restore file had wrong id[%s].", taskId, task.getId());
+ throw new ISE("Task[%s] restore file had wrong id[%s]", taskId, task.getId());
}
if (taskConfig.isRestoreTasksOnRestart() && task.canRestore()) {
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java
index c8524c7..f29af34 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java
@@ -174,7 +174,7 @@
final ForkingTaskRunnerWorkItem taskWorkItem = tasks.get(task.getId());
if (taskWorkItem == null) {
- LOGGER.makeAlert("WTF?! TaskInfo disappeared!").addData("task", task.getId()).emit();
+ LOGGER.makeAlert("TaskInfo disappeared!").addData("task", task.getId()).emit();
throw new ISE("TaskInfo disappeared for task[%s]!", task.getId());
}
@@ -183,7 +183,7 @@
}
if (taskWorkItem.processHolder != null) {
- LOGGER.makeAlert("WTF?! TaskInfo already has a processHolder")
+ LOGGER.makeAlert("TaskInfo already has a processHolder")
.addData("task", task.getId())
.emit();
throw new ISE("TaskInfo already has processHolder for task[%s]!", task.getId());
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java
index dbaadf9..352f75f 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java
@@ -770,7 +770,7 @@
final RemoteTaskRunnerWorkItem removed = completeTasks.remove(taskId);
final Worker worker;
if (removed == null || (worker = removed.getWorker()) == null) {
- log.makeAlert("WTF?! Asked to cleanup nonexistent task")
+ log.makeAlert("Asked to cleanup nonexistent task")
.addData("taskId", taskId)
.emit();
} else {
@@ -901,7 +901,7 @@
RemoteTaskRunnerWorkItem workItem = pendingTasks.remove(task.getId());
if (workItem == null) {
- log.makeAlert("WTF?! Got a null work item from pending tasks?! How can this be?!")
+ log.makeAlert("Ignoring null work item from pending task queue")
.addData("taskId", task.getId())
.emit();
return false;
@@ -1119,7 +1119,7 @@
zkWorker.setWorker(worker);
} else {
log.warn(
- "WTF, worker[%s] updated its announcement but we didn't have a ZkWorker for it. Ignoring.",
+ "Worker[%s] updated its announcement but we didn't have a ZkWorker for it. Ignoring.",
worker.getHost()
);
}
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java
index 7e7fafa..13bd490 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java
@@ -171,7 +171,7 @@
executorService.shutdown();
}
catch (SecurityException ex) {
- log.wtf(ex, "I can't control my own threads!");
+ log.error(ex, "I can't control my own threads!");
}
}
@@ -233,7 +233,7 @@
executorService.shutdownNow();
}
catch (SecurityException ex) {
- log.wtf(ex, "I can't control my own threads!");
+ log.error(ex, "I can't control my own threads!");
}
}
}
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java
index 6a56169..3dce591 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java
@@ -146,7 +146,7 @@
final TaskLock savedTaskLock = Preconditions.checkNotNull(taskAndLock.rhs, "savedTaskLock");
if (savedTaskLock.getInterval().toDurationMillis() <= 0) {
// "Impossible", but you never know what crazy stuff can be restored from storage.
- log.warn("WTF?! Got lock[%s] with empty interval for task: %s", savedTaskLock, task.getId());
+ log.warn("Ignoring lock[%s] with empty interval for task: %s", savedTaskLock, task.getId());
continue;
}
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java
index 541786d..82cf9bc 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java
@@ -176,7 +176,7 @@
taskWorkItem = tasks.get(task.getId());
if (taskWorkItem == null) {
- LOGGER.makeAlert("WTF?! TaskInfo disappeared!").addData("task", task.getId()).emit();
+ LOGGER.makeAlert("TaskInfo disappeared").addData("task", task.getId()).emit();
throw new ISE("TaskInfo disappeared for task[%s]!", task.getId());
}
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java
index da83b96..d8a7cb6 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java
@@ -487,7 +487,7 @@
long workerDiscoveryStartTime = System.currentTimeMillis();
while (!workerViewInitialized.await(30, TimeUnit.SECONDS)) {
if (System.currentTimeMillis() - workerDiscoveryStartTime > TimeUnit.MINUTES.toMillis(5)) {
- throw new ISE("WTF! Couldn't discover workers.");
+ throw new ISE("Couldn't discover workers.");
} else {
log.info("Waiting for worker discovery...");
}
@@ -1169,7 +1169,7 @@
}
if (immutableWorker == null) {
- throw new ISE("WTH! NULL immutableWorker");
+ throw new ISE("Unexpected state: null immutableWorker");
}
try {
@@ -1405,7 +1405,7 @@
break;
default:
log.makeAlert(
- "WTF! Found unrecognized state[%s] of task[%s] in taskStorage. Notification[%s] from worker[%s] is ignored.",
+ "Found unrecognized state[%s] of task[%s] in taskStorage. Notification[%s] from worker[%s] is ignored.",
knownStatusInStorage.get().getStatusCode(),
taskId,
announcement,
@@ -1468,7 +1468,7 @@
break;
default:
log.makeAlert(
- "WTF! Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.",
+ "Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.",
taskItem.getState(),
taskId,
announcement,
@@ -1513,7 +1513,7 @@
break;
default:
log.makeAlert(
- "WTF! Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.",
+ "Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.",
taskItem.getState(),
taskId,
announcement,
@@ -1523,7 +1523,7 @@
break;
default:
log.makeAlert(
- "WTF! Worker[%s] reported unrecognized state[%s] for task[%s].",
+ "Worker[%s] reported unrecognized state[%s] for task[%s].",
worker.getHost(),
announcement.getTaskStatus().getStatusCode(),
taskId
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java
index 4f6eaad..c62584f 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java
@@ -470,7 +470,7 @@
// Sanity checks.
if (!restoredNextPartitions.getStream().equals(ioConfig.getStartSequenceNumbers().getStream())) {
throw new ISE(
- "WTF?! Restored stream[%s] but expected stream[%s]",
+ "Restored stream[%s] but expected stream[%s]",
restoredNextPartitions.getStream(),
ioConfig.getStartSequenceNumbers().getStream()
);
@@ -478,7 +478,7 @@
if (!currOffsets.keySet().equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet())) {
throw new ISE(
- "WTF?! Restored partitions[%s] but expected partitions[%s]",
+ "Restored partitions[%s] but expected partitions[%s]",
currOffsets.keySet(),
ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet()
);
@@ -633,7 +633,7 @@
if (sequenceToUse == null) {
throw new ISE(
- "WTH?! cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s",
+ "Cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s",
record.getPartitionId(),
record.getSequenceNumber(),
sequences
@@ -1616,7 +1616,7 @@
pauseLock.lockInterruptibly();
// Perform all sequence related checks before checking for isPaused()
// and after acquiring pauseLock to correctly guard against duplicate requests
- Preconditions.checkState(sequenceNumbers.size() > 0, "WTH?! No Sequences found to set end sequences");
+ Preconditions.checkState(sequenceNumbers.size() > 0, "No sequences found to set end sequences");
final SequenceMetadata<PartitionIdType, SequenceOffsetType> latestSequence = getLastSequenceMetadata();
final Set<PartitionIdType> exclusiveStartPartitions;
@@ -1641,7 +1641,7 @@
} else if (latestSequence.isCheckpointed()) {
return Response.status(Response.Status.BAD_REQUEST)
.entity(StringUtils.format(
- "WTH?! Sequence [%s] has already endOffsets set, cannot set to [%s]",
+ "Sequence [%s] has already endOffsets set, cannot set to [%s]",
latestSequence,
sequenceNumbers
)).build();
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java
index 4b0265f..af92620 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java
@@ -32,6 +32,7 @@
import org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.emitter.EmittingLogger;
+import org.apache.druid.segment.SegmentUtils;
import org.apache.druid.segment.realtime.appenderator.TransactionalSegmentPublisher;
import org.apache.druid.timeline.DataSegment;
@@ -341,7 +342,10 @@
) throws IOException
{
if (mustBeNullOrEmptySegments != null && !mustBeNullOrEmptySegments.isEmpty()) {
- throw new ISE("WTH? stream ingestion tasks are overwriting segments[%s]", mustBeNullOrEmptySegments);
+ throw new ISE(
+ "Stream ingestion task unexpectedly attempted to overwrite segments: %s",
+ SegmentUtils.commaSeparatedIdentifiers(mustBeNullOrEmptySegments)
+ );
}
final Map commitMetaMap = (Map) Preconditions.checkNotNull(commitMetadata, "commitMetadata");
final SeekableStreamEndSequenceNumbers<PartitionIdType, SequenceOffsetType> finalPartitions =
@@ -353,7 +357,7 @@
// Sanity check, we should only be publishing things that match our desired end state.
if (!getEndOffsets().equals(finalPartitions.getPartitionSequenceNumberMap())) {
throw new ISE(
- "WTF?! Driver for sequence [%s], attempted to publish invalid metadata[%s].",
+ "Driver for sequence[%s] attempted to publish invalid metadata[%s].",
SequenceMetadata.this.toString(),
commitMetadata
);
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java
index 4587dcf..2fdf85b 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java
@@ -423,7 +423,7 @@
log.warn("Ignoring checkpoint request because taskGroup[%d] is inactive", taskGroupId);
return false;
} else {
- throw new ISE("WTH?! cannot find taskGroup [%s] among all activelyReadingTaskGroups [%s]", taskGroupId,
+ throw new ISE("Cannot find taskGroup [%s] among all activelyReadingTaskGroups [%s]", taskGroupId,
activelyReadingTaskGroups
);
}
@@ -1494,7 +1494,7 @@
final TaskData prevTaskData = taskGroup.tasks.putIfAbsent(taskId, new TaskData());
if (prevTaskData != null) {
throw new ISE(
- "WTH? a taskGroup[%s] already exists for new task[%s]",
+ "taskGroup[%s] already exists for new task[%s]",
prevTaskData,
taskId
);
@@ -2518,7 +2518,7 @@
// The below get should throw ExecutionException since result is null.
final Map<PartitionIdType, SequenceOffsetType> pauseResult = pauseFutures.get(i).get();
throw new ISE(
- "WTH? The pause request for task [%s] is supposed to fail, but returned [%s]",
+ "Pause request for task [%s] should have failed, but returned [%s]",
taskId,
pauseResult
);
@@ -2674,7 +2674,7 @@
final String taskId = entry.getKey();
final TaskData taskData = entry.getValue();
- Preconditions.checkNotNull(taskData.status, "WTH? task[%s] has a null status", taskId);
+ Preconditions.checkNotNull(taskData.status, "task[%s] has null status", taskId);
if (taskData.status.isFailure()) {
stateManager.recordCompletedTaskState(TaskState.FAILED);
@@ -2774,7 +2774,7 @@
continue;
}
- Preconditions.checkNotNull(taskData.status, "WTH? task[%s] has a null status", taskId);
+ Preconditions.checkNotNull(taskData.status, "Task[%s] has null status", taskId);
// remove failed tasks
if (taskData.status.isFailure()) {
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java b/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java
index 6855d08..3f49776 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java
@@ -345,7 +345,7 @@
if (taskId.equals(task.getId())) {
assignedTasks.put(taskId, task);
} else {
- throw new ISE("WTF! Corrupted assigned task on disk[%s].", taskFile.getAbsoluteFile());
+ throw new ISE("Corrupted assigned task on disk[%s].", taskFile.getAbsoluteFile());
}
}
catch (IOException ex) {
@@ -471,7 +471,7 @@
if (taskId.equals(taskAnnouncement.getTaskId())) {
completedTasks.put(taskId, taskAnnouncement);
} else {
- throw new ISE("WTF! Corrupted completed task on disk[%s].", taskFile.getAbsoluteFile());
+ throw new ISE("Corrupted completed task on disk[%s].", taskFile.getAbsoluteFile());
}
}
catch (IOException ex) {
@@ -699,7 +699,7 @@
if (!status.isComplete()) {
log.warn(
- "WTF?! Got status notice for task [%s] that isn't complete (status = [%s])...",
+ "Got status notice for task [%s] that isn't complete (status = [%s])...",
task.getId(),
status.getStatusCode()
);
diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java
index b8a2220..06ebc56 100644
--- a/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java
+++ b/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java
@@ -331,7 +331,7 @@
if (intervals.equals(ImmutableList.of(testCase.interval))) {
return ImmutableSet.copyOf(testCase.segments);
} else {
- throw new IllegalArgumentException("WTF");
+ throw new IllegalArgumentException("BAD");
}
}
diff --git a/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java b/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java
index 4dd7b70..a400650 100644
--- a/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java
+++ b/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java
@@ -76,7 +76,7 @@
}
}
catch (FileNotFoundException e) {
- log.wtf(e, "This can only happen if the .exists() call lied.");
+ log.error(e, "This can only happen if the .exists() call lied.");
}
finally {
CloseQuietly.close(stream);
diff --git a/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java b/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java
index f400bac..e30e9d1 100644
--- a/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java
+++ b/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java
@@ -124,7 +124,7 @@
List<T> retVal = result.toList();
if (retVal == null) {
- throw new ISE("Got a null list of results! WTF?!");
+ throw new ISE("Got a null list of results");
}
return retVal;
diff --git a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java
index b9d0c7c..62c65f7 100644
--- a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java
+++ b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java
@@ -169,7 +169,7 @@
}
if (newBuckets < maxBuckets) {
- throw new ISE("WTF?! newBuckets[%,d] < maxBuckets[%,d]", newBuckets, maxBuckets);
+ throw new ISE("newBuckets[%,d] < maxBuckets[%,d]", newBuckets, maxBuckets);
}
ByteBuffer newTableBuffer = buffer.duplicate();
@@ -206,7 +206,7 @@
final int newBucket = findBucket(true, newBuckets, newTableBuffer, keyBuffer, keyHash);
if (newBucket < 0) {
- throw new ISE("WTF?! Couldn't find a bucket while resizing?!");
+ throw new ISE("Couldn't find a bucket while resizing");
}
final int newBucketOffset = newBucket * bucketSizeWithHash;
@@ -230,7 +230,7 @@
growthCount++;
if (size != newSize) {
- throw new ISE("WTF?! size[%,d] != newSize[%,d] after resizing?!", size, newSize);
+ throw new ISE("size[%,d] != newSize[%,d] after resizing", size, newSize);
}
}
diff --git a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java
index b15845b..2c7c320 100644
--- a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java
+++ b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java
@@ -98,7 +98,7 @@
// We check this already in SpillingGrouper to ensure that LimitedBufferHashGrouper is only used when there is
// sufficient buffer capacity. If this error occurs, something went very wrong.
if (!validateBufferCapacity(totalBuffer.capacity())) {
- throw new IAE("WTF? Using LimitedBufferHashGrouper with insufficient buffer capacity.");
+ throw new IAE("LimitedBufferHashGrouper initialized with insufficient buffer capacity");
}
//only store offsets up to `limit` + 1 instead of up to # of buckets, we only keep the top results
@@ -485,7 +485,7 @@
final int newBucket = findBucket(true, maxBuckets, newTableBuffer, keyBuffer, keyHash);
if (newBucket < 0) {
- throw new ISE("WTF?! Couldn't find a bucket while resizing?!");
+ throw new ISE("Couldn't find a bucket while resizing");
}
final int newBucketOffset = newBucket * bucketSizeWithHash;
diff --git a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java
index 9c72b30..b09f731 100644
--- a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java
+++ b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java
@@ -65,7 +65,7 @@
)
{
// "legacy" should be non-null due to toolChest.mergeResults
- final boolean legacy = Preconditions.checkNotNull(query.isLegacy(), "WTF?! Expected non-null legacy");
+ final boolean legacy = Preconditions.checkNotNull(query.isLegacy(), "Expected non-null 'legacy' parameter");
final Object numScannedRows = responseContext.get(ResponseContext.Key.NUM_SCANNED_ROWS);
if (numScannedRows != null) {
diff --git a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java
index be164dd..2efe5e4 100644
--- a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java
+++ b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java
@@ -253,7 +253,7 @@
}
}
if (finalInterval == null) {
- throw new ISE("Row unexpectedly came from an unscanned interval");
+ throw new ISE("Row came from an unscanned interval");
}
}
}
diff --git a/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java b/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java
index ab5eef2..2180b73 100644
--- a/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java
+++ b/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java
@@ -80,7 +80,7 @@
AggregatorUtil.condensedAggregators(query.getAggregatorSpecs(), query.getPostAggregatorSpecs(), metric);
if (condensedAggPostAggPair.lhs.isEmpty() && condensedAggPostAggPair.rhs.isEmpty()) {
- throw new ISE("WTF! Can't find the metric to do topN over?");
+ throw new ISE("Can't find the topN metric");
}
// Run topN for only a single metric
TopNQuery singleMetricQuery = new TopNQueryBuilder(query)
diff --git a/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java b/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java
index ac48901..490f625 100644
--- a/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java
+++ b/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java
@@ -217,7 +217,7 @@
if (IncrementalIndexRow.EMPTY_ROW_INDEX == prev) {
getNumEntries().incrementAndGet();
} else {
- throw new ISE("WTF! we are in sychronized block.");
+ throw new ISE("Unexpected state: Concurrent fact addition.");
}
}
}
diff --git a/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java b/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java
index df9241b..76bbb62 100644
--- a/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java
+++ b/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java
@@ -60,7 +60,7 @@
{
// Verify expression has just one binding.
if (expression.analyzeInputs().getRequiredBindings().size() != 1) {
- throw new ISE("WTF?! Expected expression with just one binding");
+ throw new ISE("Expected expression with just one binding");
}
this.selector = Preconditions.checkNotNull(selector, "selector");
diff --git a/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java b/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java
index a3bf08c..cbd98cf 100644
--- a/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java
+++ b/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java
@@ -56,7 +56,7 @@
{
// Verify expression has just one binding.
if (expression.analyzeInputs().getRequiredBindings().size() != 1) {
- throw new ISE("WTF?! Expected expression with just one binding");
+ throw new ISE("Expected expression with just one binding");
}
this.selector = Preconditions.checkNotNull(selector, "selector");
diff --git a/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java b/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java
index 729923c..35e642f 100644
--- a/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java
+++ b/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java
@@ -210,7 +210,7 @@
if (index4.getAvailableDimensions().size() != 0) {
// Just double-checking that the exclusions are working properly
- throw new ISE("WTF?! Expected no dimensions in index4");
+ throw new ISE("Expected no dimensions in index4");
}
}
diff --git a/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java b/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java
index 70e1abc..592baeb 100644
--- a/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java
+++ b/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java
@@ -132,7 +132,7 @@
public void testBufferTooSmall()
{
expectedException.expect(IAE.class);
- expectedException.expectMessage("WTF? Using LimitedBufferHashGrouper with insufficient buffer capacity.");
+ expectedException.expectMessage("LimitedBufferHashGrouper initialized with insufficient buffer capacity");
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
makeGrouper(columnSelectorFactory, 10, 2, 100);
}
diff --git a/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java b/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java
index d51402d..9d29d80 100644
--- a/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java
+++ b/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java
@@ -60,7 +60,7 @@
{
Preconditions.checkState(
instance.getPort() >= 0 || (instance.getSslPort() != null && instance.getSslPort() >= 0),
- "WTH?! Both port and sslPort not set"
+ "Both port and sslPort not set"
);
final int port;
final String scheme;
diff --git a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java
index cc4d033..bf2bcb4 100644
--- a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java
+++ b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java
@@ -322,7 +322,7 @@
// Metadata transaction cannot fail because we are not trying to do one.
if (!result.isSuccess()) {
- throw new ISE("WTF?! announceHistoricalSegments failed with null metadata, should not happen.");
+ throw new ISE("announceHistoricalSegments failed with null metadata, should not happen.");
}
return result.getSegments();
diff --git a/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java b/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java
index b2ac7e8..80e8dc6 100644
--- a/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java
+++ b/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java
@@ -334,9 +334,9 @@
dataSegment,
(segment, existingLock) -> {
if (existingLock == null) {
- throw new ISE("WTH? the given lock has already been removed");
+ throw new ISE("Lock has already been removed");
} else if (existingLock != lock) {
- throw new ISE("WTH? Different lock instance");
+ throw new ISE("Different lock instance");
} else {
if (existingLock.numReferences == 1) {
return null;
diff --git a/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java b/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java
index bf5ba26..137280e 100644
--- a/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java
+++ b/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java
@@ -96,7 +96,7 @@
!newSegment.getId().equals(currentSegment.getId())) {
// Sanity check: identifier should not change
throw new ISE(
- "WTF?! Cannot swap identifier[%s] -> [%s]!",
+ "Cannot swap identifier[%s] -> [%s]",
currentSegment.getId(),
newSegment.getId()
);
diff --git a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java
index d5a190a..62a5ba9 100644
--- a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java
+++ b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java
@@ -706,12 +706,12 @@
// Sanity checks
for (FireHydrant hydrant : sink) {
if (sink.isWritable()) {
- throw new ISE("WTF?! Expected sink to be no longer writable before mergeAndPush. Segment[%s].", identifier);
+ throw new ISE("Expected sink to be no longer writable before mergeAndPush for segment[%s].", identifier);
}
synchronized (hydrant) {
if (!hydrant.hasSwapped()) {
- throw new ISE("WTF?! Expected sink to be fully persisted before mergeAndPush. Segment[%s].", identifier);
+ throw new ISE("Expected sink to be fully persisted before mergeAndPush for segment[%s].", identifier);
}
}
}
diff --git a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java
index 8e5c3fe..5ecefeb 100644
--- a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java
+++ b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java
@@ -131,7 +131,7 @@
// There should be only one appending segment at any time
Preconditions.checkState(
this.appendingSegment == null,
- "WTF?! Current appendingSegment[%s] is not null. "
+ "Current appendingSegment[%s] is not null. "
+ "Its state must be changed before setting a new appendingSegment[%s]",
this.appendingSegment,
appendingSegment
@@ -345,7 +345,7 @@
for (SegmentIdWithShardSpec identifier : appenderator.getSegments()) {
if (identifier.equals(newSegment)) {
throw new ISE(
- "WTF?! Allocated segment[%s] which conflicts with existing segment[%s].",
+ "Allocated segment[%s] which conflicts with existing segment[%s].",
newSegment,
identifier
);
@@ -418,7 +418,7 @@
);
}
catch (SegmentNotWritableException e) {
- throw new ISE(e, "WTF?! Segment[%s] not writable when it should have been.", identifier);
+ throw new ISE(e, "Segment[%s] not writable when it should have been.", identifier);
}
} else {
return AppenderatorDriverAddResult.fail();
diff --git a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java
index 5e385fa..ea466f6 100644
--- a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java
+++ b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java
@@ -197,7 +197,7 @@
synchronized (segments) {
final SegmentsForSequence activeSegmentsForSequence = segments.get(sequenceName);
if (activeSegmentsForSequence == null) {
- throw new ISE("WTF?! Asked to remove segments for sequenceName[%s] which doesn't exist...", sequenceName);
+ throw new ISE("Asked to remove segments for sequenceName[%s], which doesn't exist", sequenceName);
}
for (final SegmentIdWithShardSpec identifier : identifiers) {
@@ -207,7 +207,7 @@
if (segmentsOfInterval == null ||
segmentsOfInterval.getAppendingSegment() == null ||
!segmentsOfInterval.getAppendingSegment().getSegmentIdentifier().equals(identifier)) {
- throw new ISE("WTF?! Asked to remove segment[%s] that didn't exist...", identifier);
+ throw new ISE("Asked to remove segment[%s], which doesn't exist", identifier);
}
segmentsOfInterval.finishAppendingToCurrentActiveSegment(SegmentWithState::finishAppending);
}
@@ -424,7 +424,7 @@
if (segmentWithState.getState() == SegmentState.APPENDING) {
if (pair != null && pair.lhs != null) {
throw new ISE(
- "WTF?! there was already an appendingSegment[%s] before adding an appendingSegment[%s]",
+ "appendingSegment[%s] existed before adding an appendingSegment[%s]",
pair.lhs,
segmentWithState
);
diff --git a/server/src/main/java/org/apache/druid/segment/realtime/plumber/Plumbers.java b/server/src/main/java/org/apache/druid/segment/realtime/plumber/Plumbers.java
index 57a239d..44d611d 100644
--- a/server/src/main/java/org/apache/druid/segment/realtime/plumber/Plumbers.java
+++ b/server/src/main/java/org/apache/druid/segment/realtime/plumber/Plumbers.java
@@ -76,7 +76,7 @@
catch (IndexSizeExceededException e) {
// Shouldn't happen if this is only being called by a single thread.
// plumber.add should be swapping out indexes before they fill up.
- throw new ISE(e, "WTF?! Index size exceeded, this shouldn't happen. Bad Plumber!");
+ throw new ISE(e, "Index size exceeded");
}
if (addResult.getRowCount() == -1) {
diff --git a/server/src/main/java/org/apache/druid/segment/realtime/plumber/RealtimePlumber.java b/server/src/main/java/org/apache/druid/segment/realtime/plumber/RealtimePlumber.java
index 86dd7a1..6210754 100644
--- a/server/src/main/java/org/apache/druid/segment/realtime/plumber/RealtimePlumber.java
+++ b/server/src/main/java/org/apache/druid/segment/realtime/plumber/RealtimePlumber.java
@@ -95,6 +95,7 @@
import java.util.concurrent.TimeUnit;
/**
+ *
*/
public class RealtimePlumber implements Plumber
{
@@ -213,7 +214,8 @@
}
@Override
- public IncrementalIndexAddResult add(InputRow row, Supplier<Committer> committerSupplier) throws IndexSizeExceededException
+ public IncrementalIndexAddResult add(InputRow row, Supplier<Committer> committerSupplier)
+ throws IndexSizeExceededException
{
long messageTimestamp = row.getTimestampFromEpoch();
final Sink sink = getSink(messageTimestamp);
@@ -394,7 +396,7 @@
if (!isPushedMarker.exists()) {
removeSegment(sink, mergedTarget);
if (mergedTarget.exists()) {
- log.wtf("Merged target[%s] exists?!", mergedTarget);
+ log.warn("Merged target[%s] still exists after attempt to delete it; skipping push.", mergedTarget);
return;
}
} else {
diff --git a/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java b/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java
index fc466d1..7f33d1e 100644
--- a/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java
+++ b/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java
@@ -391,7 +391,7 @@
} else {
log.makeAlert(
th,
- "WTF! Couldn't schedule next sync. [%s] is not being synced any more, restarting Druid process on that "
+ "Couldn't schedule next sync. [%s] is not being synced any more, restarting Druid process on that "
+ "server might fix the issue.",
logIdentity
).emit();
diff --git a/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java b/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java
index 0d29769..a05c5aa 100644
--- a/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java
+++ b/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java
@@ -227,7 +227,7 @@
break;
default:
scheduleNextRunImmediately = false;
- log.error("WTF! Server[%s] returned unknown state in status[%s].", serverId, e.getStatus());
+ log.error("Server[%s] returned unknown state in status[%s].", serverId, e.getStatus());
}
}
}
diff --git a/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java b/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java
index aa1728e..876f4b6 100644
--- a/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java
+++ b/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java
@@ -98,7 +98,7 @@
for (TaskStatusPlus status : compactionTasks) {
final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId());
if (response == null) {
- throw new ISE("WTH? got a null paylord from overlord for task[%s]", status.getId());
+ throw new ISE("Got a null paylord from overlord for task[%s]", status.getId());
}
if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) {
final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload();
@@ -107,7 +107,7 @@
final int numSubTasks = findNumMaxConcurrentSubTasks(compactionTaskQuery.getTuningConfig());
numEstimatedNonCompleteCompactionTasks += numSubTasks + 1; // count the compaction task itself
} else {
- throw new ISE("WTH? task[%s] is not a compactionTask?", status.getId());
+ throw new ISE("task[%s] is not a compactionTask", status.getId());
}
}
diff --git a/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java b/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java
index cd3ac2c..7526ecb 100644
--- a/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java
+++ b/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java
@@ -395,7 +395,7 @@
lookupCoordinatorManagerConfig.getHostTimeout().getMillis() * 10,
TimeUnit.MILLISECONDS
)) {
- throw new ISE("WTF! LookupCoordinatorManager executor from last start() hasn't finished. Failed to Start.");
+ throw new ISE("LookupCoordinatorManager executor from last start() hasn't finished. Failed to Start.");
}
executorService = MoreExecutors.listeningDecorator(
diff --git a/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java b/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java
index 76fe865..607a3c9 100644
--- a/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java
+++ b/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java
@@ -844,7 +844,7 @@
} else if (a instanceof DataSegment && b instanceof DataSegment) {
return ((DataSegment) a).getId().compareTo(((DataSegment) b).getId());
} else {
- throw new IllegalStateException("WTF??");
+ throw new IllegalStateException("BAD");
}
}
);
diff --git a/services/src/main/java/org/apache/druid/cli/DumpSegment.java b/services/src/main/java/org/apache/druid/cli/DumpSegment.java
index 9ba2819..eab178a 100644
--- a/services/src/main/java/org/apache/druid/cli/DumpSegment.java
+++ b/services/src/main/java/org/apache/druid/cli/DumpSegment.java
@@ -189,7 +189,7 @@
runBitmaps(injector, index);
break;
default:
- throw new ISE("WTF?! dumpType[%s] has no handler?", dumpType);
+ throw new ISE("dumpType[%s] has no handler", dumpType);
}
}
catch (Exception e) {
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java
index 8199e3b..9960a2c 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java
@@ -60,7 +60,7 @@
rexNode,
operands -> {
if (operands.size() < 2) {
- throw new ISE("WTF?! Got binary operator[%s] with %s args?", operator.getName(), operands.size());
+ throw new ISE("Got binary operator[%s] with %s args", operator.getName(), operands.size());
}
return DruidExpression.fromExpression(
@@ -92,7 +92,7 @@
rexNode,
operands -> {
if (operands.size() < 2) {
- throw new ISE("WTF?! Got binary operator[%s] with %s args?", operator.getName(), operands.size());
+ throw new ISE("Got binary operator[%s] with %s args", operator.getName(), operands.size());
}
return DruidExpression.fromExpression(
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java
index dc657f9..e456474 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java
@@ -221,7 +221,7 @@
final RexInputRef ref = (RexInputRef) rexNode;
final String columnName = rowSignature.getColumnName(ref.getIndex());
if (columnName == null) {
- throw new ISE("WTF?! Expression referred to nonexistent index[%d]", ref.getIndex());
+ throw new ISE("Expression referred to nonexistent index[%d]", ref.getIndex());
}
return DruidExpression.fromColumn(columnName);
@@ -490,7 +490,7 @@
|| kind == SqlKind.LESS_THAN
|| kind == SqlKind.LESS_THAN_OR_EQUAL) {
final List<RexNode> operands = ((RexCall) rexNode).getOperands();
- Preconditions.checkState(operands.size() == 2, "WTF?! Expected 2 operands, got[%,d]", operands.size());
+ Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got[%,d]", operands.size());
boolean flip = false;
RexNode lhs = operands.get(0);
RexNode rhs = operands.get(1);
@@ -525,7 +525,7 @@
flippedKind = SqlKind.GREATER_THAN_OR_EQUAL;
break;
default:
- throw new ISE("WTF?! Kind[%s] not expected here", kind);
+ throw new ISE("Kind[%s] not expected here", kind);
}
} else {
flippedKind = kind;
@@ -632,7 +632,7 @@
filter = Bounds.lessThanOrEqualTo(boundRefKey, val);
break;
default:
- throw new IllegalStateException("WTF?! Shouldn't have got here...");
+ throw new IllegalStateException("Shouldn't have got here");
}
return filter;
@@ -770,7 +770,7 @@
case LESS_THAN_OR_EQUAL:
return Bounds.lessThan(boundRefKey, String.valueOf(interval.getEndMillis()));
default:
- throw new IllegalStateException("WTF?! Shouldn't have got here...");
+ throw new IllegalStateException("Shouldn't have got here");
}
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java
index 23c48df..1f716e7 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java
@@ -195,7 +195,7 @@
final RexInputRef ref = (RexInputRef) rexNode;
final String columnName = rowSignature.getColumnName(ref.getIndex());
if (columnName == null) {
- throw new ISE("WTF?! PostAgg referred to nonexistent index[%d]", ref.getIndex());
+ throw new ISE("PostAggregator referred to nonexistent index[%d]", ref.getIndex());
}
return new FieldAccessPostAggregator(
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java
index 77baafb..588a2fe 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java
@@ -23,6 +23,7 @@
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.druid.java.util.common.ISE;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
@@ -59,8 +60,7 @@
TimeFloorOperatorConversion.toTimestampFloorOrCeilArgs(plannerContext, rowSignature, call.getOperands())
);
} else {
- // WTF? CEIL with the wrong number of arguments?
- return null;
+ throw new ISE("Unexpected number of arguments");
}
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java
index d27c1e7..be8d891 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java
@@ -23,6 +23,7 @@
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.druid.java.util.common.ISE;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.OperatorConversions;
@@ -59,8 +60,7 @@
TimeFloorOperatorConversion.toTimestampFloorOrCeilArgs(plannerContext, rowSignature, call.getOperands())
);
} else {
- // WTF? FLOOR with the wrong number of arguments?
- return null;
+ throw new ISE("Unexpected number of arguments");
}
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java
index 7603451..e638ff4 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java
@@ -76,7 +76,7 @@
public int compareTo(BoundValue o)
{
if (!comparator.equals(o.comparator)) {
- throw new ISE("WTF?! Comparator mismatch?!");
+ throw new ISE("Comparator mismatch");
}
return comparator.compare(value, o.value);
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java
index ce29494..971ba16 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java
@@ -153,7 +153,8 @@
// We found a simplification. Remove the old filters and add new ones.
for (final BoundDimFilter bound : filterList) {
if (!newChildren.remove(bound)) {
- throw new ISE("WTF?! Tried to remove bound but couldn't?");
+ // Don't expect this to happen, but include it as a sanity check.
+ throw new ISE("Tried to remove bound, but couldn't");
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java
index 54b2625..631fd93 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java
@@ -85,7 +85,8 @@
for (final SelectorDimFilter selector : filterList) {
values.add(selector.getValue());
if (!children.remove(selector)) {
- throw new ISE("WTF?! Tried to remove selector but couldn't?");
+ // Don't expect this to happen, but include it as a sanity check.
+ throw new ISE("Tried to remove selector but couldn't");
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java
index 8f2f760..df03ff96 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java
@@ -141,7 +141,8 @@
);
if (!transformed.getIntervals().equals(ImmutableList.of(eternity()))) {
- throw new ISE("WTF?! optimizeFilterOnly was about to return filtration with intervals?!");
+ // Should not happen, but include as a sanity check to be sure.
+ throw new ISE("optimizeFilterOnly was about to return filtration with intervals");
}
return transformed;
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java
index c8cfb04..4533c95 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java
@@ -149,7 +149,7 @@
)
);
} else {
- throw new ISE("WTF?! Should not have got here, operator was: %s", operator);
+ throw new ISE("Should not have got here, operator was: %s", operator);
}
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java
index 73abd69..7a721a1 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java
@@ -393,7 +393,7 @@
case SCAN:
return scan;
default:
- throw new ISE("WTF?! Unknown stage: %s", currentStage);
+ throw new ISE("Unknown stage: %s", currentStage);
}
}
diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java
index 4c79e80..7b79d8c 100644
--- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java
+++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java
@@ -61,7 +61,7 @@
final int fieldCount = aggregate.getGroupCount() + aggregate.getAggCallList().size();
if (fieldCount != aggregate.getRowType().getFieldCount()) {
throw new ISE(
- "WTF, expected[%s] to have[%s] fields but it had[%s]",
+ "Expected[%s] to have[%s] fields but it had[%s]",
aggregate,
fieldCount,
aggregate.getRowType().getFieldCount()