)]}'
{
  "log": [
    {
      "commit": "be6b3712fa96e8c07452ae76f0358e38998a93f5",
      "tree": "81e2178e82a0e9fbce9bb97408599c7a62d60c27",
      "parents": [
        "c2d5d74e338b15c8bb3f7e346dd8ac1dd4599bfc"
      ],
      "author": {
        "name": "Cece Mei",
        "email": "yingqian.mei@gmail.com",
        "time": "Fri Apr 10 14:13:53 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Apr 10 14:13:53 2026 -0700"
      },
      "message": "prep for druid 38 (#19274)"
    },
    {
      "commit": "c2d5d74e338b15c8bb3f7e346dd8ac1dd4599bfc",
      "tree": "827f15da36c016e14d34ab84b63c922f21e2b5b9",
      "parents": [
        "bfb46e8d695addc45eabfcfb96f37bc08e580fd6"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Fri Apr 10 13:26:47 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Apr 10 13:26:47 2026 -0700"
      },
      "message": "test: convert tests in druid-processing to junit5 pt. 1 (#19289)"
    },
    {
      "commit": "bfb46e8d695addc45eabfcfb96f37bc08e580fd6",
      "tree": "7d480ff1cc503e5695d3d6e7371e4545af1858b0",
      "parents": [
        "da83c30dd493104a9e255fe1e64d91e201ec9ae6"
      ],
      "author": {
        "name": "Cece Mei",
        "email": "yingqian.mei@gmail.com",
        "time": "Fri Apr 10 11:19:12 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Apr 10 11:19:12 2026 -0700"
      },
      "message": "docs: Recommend Overlord-based auto-compaction and mark useIncrementalCache production ready (#19252)\n\n* auto\n\n* DruidRunStats\n\n* doc\n\n* check\n\n* default\n\n* legacy\n\n* default-native\n\n* format\n\n* Revert \"DruidRunStats\"\n\nThis reverts commit d3751364cce2da5c384400d0e4209797bfc2f09e.\n\n* format\n\n* cascading\n\n* doc\n\n* link\n\n* spell\n\n* test\n\n* style"
    },
    {
      "commit": "da83c30dd493104a9e255fe1e64d91e201ec9ae6",
      "tree": "c4c137c366beffe2c21b2cf9729d6923b5ca470c",
      "parents": [
        "3d87f022b45e584514b1f242ffb1d1cb5874a422"
      ],
      "author": {
        "name": "Cece Mei",
        "email": "yingqian.mei@gmail.com",
        "time": "Fri Apr 10 11:18:58 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Apr 10 11:18:58 2026 -0700"
      },
      "message": "minor: fix several CVE (#19276)\n\n* cve\n\n* compile\n\n* tmp\n\n* license\n\n* cve"
    },
    {
      "commit": "3d87f022b45e584514b1f242ffb1d1cb5874a422",
      "tree": "a8d26659da4f948fe98f2c9f3310b00fe2eee0a4",
      "parents": [
        "a0ec76ec13d54a72dc58213ef38635f784fac203"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Thu Apr 09 02:57:10 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Apr 09 02:57:10 2026 -0700"
      },
      "message": "refactor: switch remaining AggregationTestHelper methods to use java objects instead of json strings (#19275)"
    },
    {
      "commit": "a0ec76ec13d54a72dc58213ef38635f784fac203",
      "tree": "f08dafd54b348951c2a809890b6d5a0ad9daa7af",
      "parents": [
        "9157bdd6f254135c094255db8822c806537b9eb9"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Wed Apr 08 15:41:38 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 08 15:41:38 2026 -0700"
      },
      "message": "fix: clustered by virtual columns that depended on virtual columns now correctly preserve these dependencies (#19262)\n\nchanges:\n* adds `addRequiredVirtualColumns` method to `SegmentGenerationStageSpec` which resolves transitive virtual column dependencies for virtual columns used by clustering, fixing a bug where these dependent virtual columns would be lost in the shard spec and compaction state\n* adds `supportsRequiredRewrite` and `rewriteRequiredColumns` to `VirtualColumn` allowing a virtual column to rewrite its input references to equivalent names\n* adds `Expr.rewriteBindings` to rewrite identifier bindings in an `Expr` tree\n* `VirtualColumns.findEquivalent` is enhanced to transitively resolve dependent virtual columns across naming contexts before checking equivalence, enabling detection that e.g. `lower(\"v1\")` ≡ `lower(\"v0\")` when v0 and v1 are equivalent virtual columns\n* `FilterSegmentPruner` updated to use transitive equivalence when matching shard virtual columns to query virtual columns (with Optional-based caching to correctly handle nulls)\n* `Projections.matchQueryVirtualColumn` updated similarly\n* intern range shardspec dimension strings and virtual columns"
    },
    {
      "commit": "9157bdd6f254135c094255db8822c806537b9eb9",
      "tree": "bc4ba9ffe9bd10715f8f9004ce331c7490677e2f",
      "parents": [
        "8a8160157172e14a3052597e2cff4950d8ede245"
      ],
      "author": {
        "name": "mshahid6",
        "email": "maryam.shahid1299@gmail.com",
        "time": "Wed Apr 08 12:30:37 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 08 12:30:37 2026 -0700"
      },
      "message": "feat: Segment Timout Per Datasource (#19221)\n\nAdds the ability to configure perSegmentTimeout per datasource via BrokerDynamicConfig, so different per-segment timeout thresholds for can be set for different datasources e.g. larger datasources may need different thresholds than small ones."
    },
    {
      "commit": "8a8160157172e14a3052597e2cff4950d8ede245",
      "tree": "71f60623b41fe49c66d050fab1fa0a8fe8ee0441",
      "parents": [
        "0a4b7722ab83070942319d1ab6c523747bd89da7"
      ],
      "author": {
        "name": "Virushade",
        "email": "phuaguanwei99@gmail.com",
        "time": "Wed Apr 08 14:36:42 2026 +0800"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Apr 07 23:36:42 2026 -0700"
      },
      "message": "Prevent Coordinator startup failure (#19265)"
    },
    {
      "commit": "0a4b7722ab83070942319d1ab6c523747bd89da7",
      "tree": "4d4b7a82b87b003c8813ec6d1489c8c6d33aa795",
      "parents": [
        "04137070df7b2a157ff7e49abff7a4c4aa3c58ea"
      ],
      "author": {
        "name": "Zoltan Haindrich",
        "email": "kirk@rxd.hu",
        "time": "Fri Apr 03 07:06:33 2026 +0200"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Apr 03 07:06:33 2026 +0200"
      },
      "message": "minor: update some deps and reduce thread pressure during test runs (#19248)"
    },
    {
      "commit": "04137070df7b2a157ff7e49abff7a4c4aa3c58ea",
      "tree": "1b49d5bf90a59c29ee5c6792082a6c6403339c00",
      "parents": [
        "8ed78f4718607f7a0eb2090bfea97f7c99c9b86a"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Thu Apr 02 21:42:59 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Apr 02 21:42:59 2026 -0700"
      },
      "message": "perf: Interrupt controllers when canceling them. (#19233)\n\nIn PRs #18095 and #18931, worker cancellation was switched to use\ninterrupts with a lightweight non-interrupt-based failsafe. This patch\nimplements a similar idea for controllers, to aid in more prompt\ncancellation in cases where the controller is blocking on something.\n\nThe main change is to track the controller thread in ControllerHolder\nand interrupt it on cancel(), in addition to calling controller.stop().\nControllerHolder is also moved from dart.controller to msq.exec, since\nit is now a shared class, no longer Dart-specific. In addition, the\n\"workerOffline\" logic is moved to Dart\u0027s ControllerMessageListener,\nsince that really is Dart-specific."
    },
    {
      "commit": "8ed78f4718607f7a0eb2090bfea97f7c99c9b86a",
      "tree": "aa74cb34809a3f0b4556528c26cf2ba777e59b12",
      "parents": [
        "c8a1267d5aa5553ecec10a1ce6d46dc078541f63"
      ],
      "author": {
        "name": "Abhishek Radhakrishnan",
        "email": "abhishek.rb19@gmail.com",
        "time": "Thu Apr 02 16:27:48 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Apr 02 18:27:48 2026 -0500"
      },
      "message": "Fixup doc (#19260)"
    },
    {
      "commit": "c8a1267d5aa5553ecec10a1ce6d46dc078541f63",
      "tree": "21f53b19c4b80d039750ca3e8660f9ba928d231b",
      "parents": [
        "0a936211fb5236ba7e37831b180cd9587bd504eb"
      ],
      "author": {
        "name": "aho135",
        "email": "andrewho135@gmail.com",
        "time": "Thu Apr 02 15:39:36 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Apr 02 15:39:36 2026 -0700"
      },
      "message": "feat: Add an authorizer for read only operations (#19243)\n\n* Add an authorizer for read only operations\n\n* Checkstyle fixes\n\n* Remove use of deprecated methods\n\n* Register ReadOnlyAuthorizer\n\n* Migrate ReadOnlyAuthorizer from druid-basic-security to druid-server\n\n* Document ReadOnly authorizer\n\n* Spell check\n\n* Hook up ReadOnlyAuthorizer with Policy\n\n* Move ReadOnlyAuthorizer back to druid-basic-security\n\n* Example configuration for basic security with readonly\n\n* Change to readOnly for consistency\n\n* Fix configuration example\n\n* Update druid-basic-security.md"
    },
    {
      "commit": "0a936211fb5236ba7e37831b180cd9587bd504eb",
      "tree": "a8c6cb5835e97b953729ed8229b554368e46e4c5",
      "parents": [
        "e499a52e51e19c49397a137f85aa68dcef4c6662"
      ],
      "author": {
        "name": "Zoltan Haindrich",
        "email": "kirk@rxd.hu",
        "time": "Thu Apr 02 16:43:36 2026 +0200"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Apr 02 16:43:36 2026 +0200"
      },
      "message": "feat: use aws-crt by default for async s3 operations (#19249)"
    },
    {
      "commit": "e499a52e51e19c49397a137f85aa68dcef4c6662",
      "tree": "e3524fba2d55caab9e7855422b9d71b0d824b145",
      "parents": [
        "4a49c73e84e1a17a1e7a1a10d5172a24fc309d53"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Thu Apr 02 06:54:59 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Apr 02 08:54:59 2026 -0500"
      },
      "message": "minor: Show running Dart queries first. (#19237)\n\nThis patch adjusts the ordering of current-dart-panel in the web console\nto match the current tasks panel: RUNNING is first, ACCEPTED is next,\nthen all the completed states follow. Within each category, queries\nare sorted by timestamp, newest first: RUNNING and ACCEPTED by start\ntime, and completed queries by finish time.\n\nThis patch also renames the interface method \"getRunningQueries\" to\n\"getQueries\", because it can return completed queries too."
    },
    {
      "commit": "4a49c73e84e1a17a1e7a1a10d5172a24fc309d53",
      "tree": "ed3e735918a53e4ed96ee49e49099d77d4b253de",
      "parents": [
        "434163f341af31ba188ac813f81f398cac8f6b6c"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Thu Apr 02 06:54:42 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Apr 02 08:54:42 2026 -0500"
      },
      "message": "dev: Add typecheck to npm run test-unit. (#19251)\n\nThis ensures that TypeScript typechecking happens on calls to test-unit."
    },
    {
      "commit": "434163f341af31ba188ac813f81f398cac8f6b6c",
      "tree": "dcc9ddd315ac1dc7ac62bc44896a01bfa292c962",
      "parents": [
        "e6b0ce9a4c579a63f258396c18acf801fd5dee73"
      ],
      "author": {
        "name": "Lucas Capistrant",
        "email": "capistrant@users.noreply.github.com",
        "time": "Wed Apr 01 19:19:22 2026 -0500"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 01 19:19:22 2026 -0500"
      },
      "message": "docs: cascading reindexing docs (#19213)\n\n* cascading reindexing docs\n\n* Apply suggestion from @capistrant\n\n* Apply suggestions from code review\n\nCo-authored-by: 317brian \u003c53799971+317brian@users.noreply.github.com\u003e\n\n---------\n\nCo-authored-by: 317brian \u003c53799971+317brian@users.noreply.github.com\u003e"
    },
    {
      "commit": "e6b0ce9a4c579a63f258396c18acf801fd5dee73",
      "tree": "387369086032f22631ea09c33d00ac60678c8d81",
      "parents": [
        "c3262756f1d5eb8fd992e93b54a27c3181ed9bfa"
      ],
      "author": {
        "name": "Lucas Capistrant",
        "email": "capistrant@users.noreply.github.com",
        "time": "Wed Apr 01 15:05:35 2026 -0500"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 01 15:05:35 2026 -0500"
      },
      "message": "enhance the cluster compaction config view to allow configuring mostFragmentedFirst more easily for operators (#19242)"
    },
    {
      "commit": "c3262756f1d5eb8fd992e93b54a27c3181ed9bfa",
      "tree": "2c46e667697c2552e1dd6496998d94b46fb9c50a",
      "parents": [
        "c200303022da0d3bf16422447d2bfc429a2a75a2"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Wed Apr 01 10:37:55 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 01 10:37:55 2026 -0700"
      },
      "message": "feat: MSQ exception handling improvements. (#19234)\n\nThis patch makes various changes to MSQ exception handling, with the\ngoal of better compatibility with DruidExceptions:\n\n1) Add DruidExceptionFault, which allows including DruidExceptions in\n   MSQErrorReports without losing any information.\n\n2) Make MSQFault#toDruidException abstract so all fault implementations\n   must reference a specific category and persona. Changed the category\n   and persona of various faults, in situations where the inherited\n   default was not ideal.\n\n3) QueryRuntimeFault no longer generated, instead we\u0027re relying fully\n   on DruidException for exceptions that originate in the query runtime.\n\n4) Retain original DruidException, rather than wrapping and re-throwing,\n   in ControllerImpl, SegmentGeneratorFrameProcessor, and\n   RowBasedFrameWriter.\n\n5) Update Either to throw non-DEVELOPER DruidExceptions as-is, rather\n   than wrapping them."
    },
    {
      "commit": "c200303022da0d3bf16422447d2bfc429a2a75a2",
      "tree": "1605330ef4a4f66a930cdaa9d92a0149cad99e46",
      "parents": [
        "fc63999a6af60d676d0f6bc5e0847659e9c0da7f"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Wed Apr 01 08:58:45 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 01 08:58:45 2026 -0700"
      },
      "message": "fix: modify ExpressionFilter.canVectorizeMatcher and AggregatorUtil.canVectorize to use ExpressionPlanner.plan (#19245)\n\nchanges:\n* `ExpressionFilter.canVectorizeMatcher` and `AggregatorUtil.canVectorize` now use `ExpressionPlanner.plan` to check for `Trait.VECTORIZABLE` instead of calling `Expr.canVectorize` directly\n* updated javadoc for `Expr.canVectorize` to indicate that it alone isn\u0027t sufficient to decide if vector processing should be used\n* update `BaseFilterTest.assertFilterMatchesSkipVectorize` to now assert that the cursor factory is not vectorizable (or not a `ColumnarFrameCursorFactory`) so that we do not accidentally skip vectorization for things that are vectorizable, which would have caught the bug for `ExpressionFilter`, fixed up tests that were incorrectly skipping vector coverage\n* update `AggregatorUtilTest` to cover `canVectorize`"
    },
    {
      "commit": "fc63999a6af60d676d0f6bc5e0847659e9c0da7f",
      "tree": "96de855ee90018437860140d20badf2acd5fc5b3",
      "parents": [
        "884955eaf912404527fa903562c59111e159f073"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Wed Apr 01 03:46:47 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 01 16:16:47 2026 +0530"
      },
      "message": "fix: Handle poll idle ratio not being available. (#19246)\n\nThe change to RecordSupplier allows Kinesis task live reports to work\nagain. They had been throwing UnsupportedOperationException.\n\nThe change to CostBasedAutoScaler differentiates poll idle ratio of -1\n(no data) from 0 (never idle). There was already some logic in the class\nfor dealing with the negative case: it would treat it like 0.5. But this\nlogic had not been reachable due to extractPollIdleRatio returning 0\nrather than -1 for the no-data case."
    },
    {
      "commit": "884955eaf912404527fa903562c59111e159f073",
      "tree": "a311a1b1b413fc6288e63da92d9bef5468593f58",
      "parents": [
        "0ac00fa273d7864b3f9b7cf943e199a6430c2abc"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Tue Mar 31 23:46:37 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 31 23:46:37 2026 -0700"
      },
      "message": "dev: Add 14 day dependabot cooldown. (#19241)\n\nThis protects against not-yet-discovered regressions or security\nissues in new releases of dependencies."
    },
    {
      "commit": "0ac00fa273d7864b3f9b7cf943e199a6430c2abc",
      "tree": "4eca15c2d67e4c7347551dafc8e4e0e83b5e95af",
      "parents": [
        "e358a2686724efd452b2102a99f87adb359c41b9"
      ],
      "author": {
        "name": "Zoltan Haindrich",
        "email": "kirk@rxd.hu",
        "time": "Wed Apr 01 07:27:48 2026 +0200"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Apr 01 07:27:48 2026 +0200"
      },
      "message": "fix: deadlock in HeapMemorySegmentMetadataCache (#19240)"
    },
    {
      "commit": "e358a2686724efd452b2102a99f87adb359c41b9",
      "tree": "82e9e6dc2130ace7f87b6492a4a7c657d21fd516",
      "parents": [
        "f4f9c0636ac972022077a6f321071535da409327"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Tue Mar 31 16:58:31 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 31 16:58:31 2026 -0700"
      },
      "message": "minor: FrameCombiner related cleanups. (#19244)\n\nFollow-ups to #19238:\n\n1) Remove a stale comment in GroupByFrameCombinerTest.\n\n2) Make TrackingColumnValueSelector.classOfObject() call its delegate,\n   just like TrackingDimensionSelector does. This likely doesn\u0027t matter\n   with how the class is actually used, but it\u0027s nice to be consistent."
    },
    {
      "commit": "f4f9c0636ac972022077a6f321071535da409327",
      "tree": "dd9f2c1e84946a7a32677596d2efa5bda82715fe",
      "parents": [
        "67de7d1889c7e0511d5d4547df70dd960a571d77"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Tue Mar 31 16:46:30 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 31 16:46:30 2026 -0700"
      },
      "message": "refactor: remove ParseSpec (#19239)\n\nchanges:\n* `ParseSpec` and all of its implementations have been removed, including `JavascriptParseSpec` and `JSONLowercaseParseSpec`, which have no `InputFormat` equivalents\n* migrated all tests which were still using `ParseSpec` to convert into a normal schema + input format to just use `InputFormat` directly instead\n* `FlattenJSONBenchmark` now measures the `JsonInputFormat` instead of using a json parse spec/parser"
    },
    {
      "commit": "67de7d1889c7e0511d5d4547df70dd960a571d77",
      "tree": "6a13dd85e27f3caf39070e54d4ff26395efcc3d3",
      "parents": [
        "0bbb8bb9e814bc368cba21c81987223a42d4debe"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Tue Mar 31 13:05:48 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 31 13:05:48 2026 -0700"
      },
      "message": "fix: GroupByFrameCombiner selectors must track cursor changes. (#19238)\n\nThe groupBy combiner introduced in #19193 created selectors incorrectly:\nthey tracked a single underlying cursor rather than following changes\nin the cursor. This patch addresses it by adding indirection through\nTrackingDimensionSelector and TrackingColumnValueSelector."
    },
    {
      "commit": "0bbb8bb9e814bc368cba21c81987223a42d4debe",
      "tree": "29cc5940a770b297420e6b234c475f34921d2173",
      "parents": [
        "5e438c03bacf00216faa12f7548c2e6923f704f6"
      ],
      "author": {
        "name": "Zoltan Haindrich",
        "email": "kirk@rxd.hu",
        "time": "Tue Mar 31 08:21:23 2026 +0200"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 31 08:21:23 2026 +0200"
      },
      "message": "minor: update some dependencies (#19208)"
    },
    {
      "commit": "5e438c03bacf00216faa12f7548c2e6923f704f6",
      "tree": "e790774b161ed70a21e803877da97a58e46dcf92",
      "parents": [
        "a1615105d7ad7e14b8f1e445707eb271df87a901"
      ],
      "author": {
        "name": "jtuglu1",
        "email": "jtuglu@netflix.com",
        "time": "Mon Mar 30 20:20:56 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 20:20:56 2026 -0700"
      },
      "message": "Bump Parquet, Avro, and Iceberg versions (#19232)"
    },
    {
      "commit": "a1615105d7ad7e14b8f1e445707eb271df87a901",
      "tree": "a7a773b74391c599d62903bfff7df1a596376f20",
      "parents": [
        "3d8b81ceba96ea4f34bb205ef499ef5b5d60152a"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Mon Mar 30 19:49:25 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 19:49:25 2026 -0700"
      },
      "message": "refactor: migrate influx-extensions to InputFormat, remove InfluxParser/InfluxParseSpec (#19229)"
    },
    {
      "commit": "3d8b81ceba96ea4f34bb205ef499ef5b5d60152a",
      "tree": "3e363857ca4584d9f6353c708875091574f59500",
      "parents": [
        "219ea7a50cd393da3bbc36f902c3213fe419539a"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Mon Mar 30 14:21:25 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 14:21:25 2026 -0700"
      },
      "message": "feat: add SegmentPruner support for datasources/policies (#19228)\n\n* Add SegmentPruner support for RestrictedDataSource policy filters\n\nchanges:\n* adds new `include` method to `SegmentPruner` for checking individual segments for whether or not to prune\n* adds default implementation of `prune` method which calls `include`\n* adds new `combine` method to `SegmentPruner` for merging pruners\n* adds new `CompositeSegmentPruner` for cases where pruners cannot be naturally combined\n* adds new `createSegmentPruner` method to `DataSource` and `Policy` so that they can participate in pruning\n* updates `ExecutionVertex` to combine the new datasource pruner with the pruner of the filter"
    },
    {
      "commit": "219ea7a50cd393da3bbc36f902c3213fe419539a",
      "tree": "15ccba74b697396414a7edefba1c352e14af2136",
      "parents": [
        "9a7079732e3f39f8f1cf170923ecf39dcfb08b64"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Mon Mar 30 12:42:18 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 12:42:18 2026 -0700"
      },
      "message": "fix: Dart missing default timeout in prePlanned mode. (#19222)\n\nPreviously, the pre-planned mode for Dart was missing the logic to\nadd the server default timeout, meaning those queries would run with\nno timeout at all unless the user specified one. This patch fixes it\nby moving the timeout setting to initQueryContext, which runs in any\nprePlanned mode.\n\nThis patch also reworks common context handling by separating truly\ncommon context from task-specific context. The truly common context\nnow lives in MultiStageQueryContext#withCommonContext. One of the\nthings this function does is set the queryDeadline key, to represent\nthe time that the query should time out."
    },
    {
      "commit": "9a7079732e3f39f8f1cf170923ecf39dcfb08b64",
      "tree": "01fcdb89c9fc7efc8eee5bb78cc3484b1da55f51",
      "parents": [
        "0e7091485b47c6dfb6837e27e1abef5bb653467f"
      ],
      "author": {
        "name": "Razin Bouzar",
        "email": "razinbouzar@gmail.com",
        "time": "Mon Mar 30 11:08:07 2026 -0400"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 08:08:07 2026 -0700"
      },
      "message": "feat: Emit build revision dimension and add it to to sys.servers table (#19123)\n\nThis patch exposes the build revision (git commit SHA) of the JAR running on each node:\n\n- buildRevision metric dimension — emitted on every metric so you can confirm all nodes in a cluster are running the intended revision during rolling deployments. Empty string when\nrunning outside a packaged JAR (e.g., during mvn test).\n- build_revision column in sys.servers — query it directly with SELECT server, version, build_revision FROM sys.servers. For more information, see SERVERS table."
    },
    {
      "commit": "0e7091485b47c6dfb6837e27e1abef5bb653467f",
      "tree": "ea931c860a4599bc517f1d2e2b1822f6eeb39f7d",
      "parents": [
        "01467e163ce1ccdbe8ece45bbf864af97e6a817a"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Mon Mar 30 08:00:11 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 10:00:11 2026 -0500"
      },
      "message": "fix: Stuck queries due to skipped \"DoneReadingInput\". (#19219)\n\nIn MEMORY output mode when workers sort locally, the controller knows\nto start the next stage when all workers have finished reading their\ninput.\n\nTypically the controller learns this because workers send the\nDoneReadingInput message when they transition from READING_INPUT to\nPRESHUFFLE_WRITING_OUTPUT. However, when a worker finishes reading\ninput very quickly and is also able to fully buffer its output, it\ntransitions directly from READING_INPUT to RESULTS_COMPLETE. This\ncauses the query to become stuck.\n\nThis patch fixes it at the controller, by additionally checking if\nall workers are done reading input when receiving a ResultsComplete\nmessage."
    },
    {
      "commit": "01467e163ce1ccdbe8ece45bbf864af97e6a817a",
      "tree": "bc65cb85e3ad24e378b11d682cbf12e4b7542ed9",
      "parents": [
        "3d1e0d03de341b41027dfe3d493800274f5e895c"
      ],
      "author": {
        "name": "Santosh Pingale",
        "email": "3813695+santosh-d3vpl3x@users.noreply.github.com",
        "time": "Mon Mar 30 13:16:45 2026 +0200"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 19:16:45 2026 +0800"
      },
      "message": "feat: add Consul-based service discovery and leader election contrib extension (#18843)\n\n* feat: add Consul discovery contrib extension, harden Consul discovery and leadership election\n\n* feat: add Consul discovery contrib extension, harden Consul discovery and leadership election\n\n* fix: address code review feedback for Consul discovery extension\n\n- Fix leaderSessionTtl computation: track explicit user setting via flag\n  to correctly recompute TTL from healthCheckInterval when omitted\n- Change leaderMaxErrorRetries default to unlimited (Long.MAX_VALUE) for\n  null/0/negative values since giving up breaks cluster operation\n- Add null session guard in leader election loop with backoff to prevent\n  tight retry loops when session creation fails\n- Validate watchSeconds \u003e\u003d 1 second to prevent non-blocking query loops\n  caused by Duration.getStandardSeconds() truncation\n- Fix metadata size check to use UTF-8 byte length instead of char count\n  for correct Consul 512-byte limit enforcement\n- Add null check for announcedNodes during re-registration to handle\n  concurrent unannouncement during shutdown\n- Update docs to reflect unlimited retry default behavior\n\n* fix: update parent version to 37.0.0-SNAPSHOT\n\n* fix: align bcprov-jdk18on version with bcpkix-jdk18on (1.79 -\u003e 1.81)\n\nbcpkix-jdk18on:1.81 requires bcprov-jdk18on:[1.81,1.82) per its POM.\nThis was missed in #18888 which updated bcpkix but not bcprov,\ncausing license check failures.\n\n* test: restore incremental cache config for consul docker tests\n\n* style: remove trailing whitespace in consul changes"
    },
    {
      "commit": "3d1e0d03de341b41027dfe3d493800274f5e895c",
      "tree": "ed7651540c8b313b87fe215f8f4626168363e962",
      "parents": [
        "aea371bc1b046791301b735f8aeb34d67f04bbba"
      ],
      "author": {
        "name": "Virushade",
        "email": "phuaguanwei99@gmail.com",
        "time": "Mon Mar 30 14:07:33 2026 +0800"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 30 14:07:33 2026 +0800"
      },
      "message": "feat: Clean up deep storage after index parallel merge finishes  (#19187)\n\n* Shuffle solution\n\nAdd delete for datasegments\n\nUnit Tests and Embedded tests\n\nCleanup\n\n* Better task logs when fail killing.\n\n* Docs\n\n* Checkstyle\n\n* Cleanup function\n\n* non-volatile\n\n* Clean up unnecessary code\n\n* Test fixes\n\n* HDFS Segment clean-up\n\n* Finish up deepstore\n\n* Fix HDFS pathing bug\n\n* Fix forbidden apis\n\n* Deep Store PR\n\n* Tests\n\n* Correct Docs\n\n* Change DataSegmentKiller to not be aware of supervisor shuffle\n\n* Test unhappy path\n\n* Exception type\n\n* Checkstyle\n\n* Extra space in md file\n\n* Forbidden API\n\n* No need to check taskId as AbstractTask already did\n\n* Rename Shuffle method\n\n* Split method that preserves empty tokens\n\n* Update docs/configuration/index.md\n\nCo-authored-by: Copilot \u003c175728472+Copilot@users.noreply.github.com\u003e\n\n* Address co-pilot comments\n\n---------\n\nCo-authored-by: Copilot \u003c175728472+Copilot@users.noreply.github.com\u003e"
    },
    {
      "commit": "aea371bc1b046791301b735f8aeb34d67f04bbba",
      "tree": "2f44b07ac205c56f3decae21010749dbe9901e09",
      "parents": [
        "cef1d031759a96132345c1a8587b6f19aa1e3b25"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Fri Mar 27 21:16:42 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 27 21:16:42 2026 -0700"
      },
      "message": "refactor: remove InputRowParser implementations (#19206)"
    },
    {
      "commit": "cef1d031759a96132345c1a8587b6f19aa1e3b25",
      "tree": "1c5ddd99a0661bcdc3badfb387093fd3f640358c",
      "parents": [
        "be58522c858e51649dd026e886c2e9c03463ed13"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Fri Mar 27 14:48:10 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 27 14:48:10 2026 -0700"
      },
      "message": "add hold metrics to StorageMonitor, fix some issues with StorageLocation (#19217)"
    },
    {
      "commit": "be58522c858e51649dd026e886c2e9c03463ed13",
      "tree": "4ae42d6141b479a0a106efcb87209e48f93c3e17",
      "parents": [
        "f5ec8be97318db0cc33dbb2473c8a075c30fa617"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 27 13:22:01 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 27 13:22:01 2026 -0700"
      },
      "message": "fix: Improved error handling in SeekableStreamIndexTaskRunner. (#19218)\n\nThe main improvement is that \"persist\" is moved out of a finally block,\nand now only happens on the normal path. This has two benefits. First,\nthere is no point in persisting on the error path, and the in-memory\nindex might be in a bad state anyway at that point. Second, moving the\npersist call out of \"finally\" fixes an issue where an exception thrown\nfrom \"persist\" would cause an exception thrown from \"add\" to be lost.\n\nThis can come up in production when the in-memory index grows too large,\ncausing the main code to throw an OutOfMemoryError, and then something\ngoes wrong with the persist too. In this situation the original\nOutOfMemoryError would not have been logged.\n\nA secondary improvement is that we catch Throwable rather than Exception\nto trigger cleanup and when handling errors that occur during cleanup.\nThis ensures we don\u0027t miss cleanup tasks when an Error is thrown by\nthe main code, and that we don\u0027t lose the original exception if an Error\nis thrown by the cleanup code."
    },
    {
      "commit": "f5ec8be97318db0cc33dbb2473c8a075c30fa617",
      "tree": "c638fdafc45a328951447d95123041aa13476a94",
      "parents": [
        "b2eed67a2c5c93934915e4890ff92c7586fa7a5e"
      ],
      "author": {
        "name": "jtuglu1",
        "email": "jtuglu@netflix.com",
        "time": "Fri Mar 27 09:35:15 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 27 09:35:15 2026 -0700"
      },
      "message": "perf: Submit tasks to TaskRunner order by task priority (#19203)\n\nEnsure active tasks are (re)-submitted to TaskRunner in order of priority. With the way things are currently structured, TaskQueue and TaskRunner both introduce their own delays to the scheduling of a task as they are both queues in their own ways. This patch attempts to minimize the HOL-blocking delay introduced by the TaskQueue and #18851 will help reduce the slowness on the TaskRunner. This helps enormously for large task volume cases (O(5k) tasks+) where lots of low-priority tasks are hitting the queue (think large compaction or batch volume) while realtime indexing tasks are being submitted. This allows the higher-priority realtime tasks to \"jump\" the line in submission to the runner (which is still ultimately FIFO relatively speaking), and not pay the O(XX) seconds of time wasted waiting for other lower-priority tasks to be submitted to the queue.\n\nNotably, this does do a sort every time startPendingTasksOnRunner is called, however, given the activeTasks should be on the order of O(10k) tasks and the comparator is comparing integers, this should be relatively cheap (empirically, this has never shown up in flamegraph as TaskQueue is bound by other things). When #18851 is merged (and priority based running is introduced) this may have less of an effect. Storing activeTasks in an ordered container might be nice, but given this is a single use-case which requires a sorted order (and the maximum container size is tolerable) I opted to go for this approach."
    },
    {
      "commit": "b2eed67a2c5c93934915e4890ff92c7586fa7a5e",
      "tree": "688f97907aba1dbbf9fdb51e2e9a4b9a25c9518f",
      "parents": [
        "d22f28277fa79a46969e4ad8ad91bff3c756d28d"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Thu Mar 26 09:04:11 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 26 09:04:11 2026 -0700"
      },
      "message": "feat: Combiner for MSQ groupBys. (#19193)\n\nThis patch adds optional row combining during merge-sort for MSQ groupBy.\n\nWhen enabled using the \"useCombiner\" parameter (default: false),\nFrameChannelMerger detects adjacent rows with identical sort keys and\ncombines them using a FrameCombiner provided by the query logic.\n\nMain files:\n\n1) FrameCombiner: stateful row combiner interface.\n\n2) FrameChannelMerger: logic for determining which rows to combine.\n\n3) GroupByFrameCombiner: implementation for the groupBy query."
    },
    {
      "commit": "d22f28277fa79a46969e4ad8ad91bff3c756d28d",
      "tree": "8e9f19c38e7b3861bdb33c78a24d1e695d5958ff",
      "parents": [
        "2fde1f5f970856ae7fc0120cdf64e10382563761"
      ],
      "author": {
        "name": "Virushade",
        "email": "phuaguanwei99@gmail.com",
        "time": "Thu Mar 26 15:11:59 2026 +0800"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 26 15:11:59 2026 +0800"
      },
      "message": "feat: Native Minor compaction (#19016)\n\n* Test Driven Dev\n\nCompactionTaskTest\n\nTaskLockHelperTest\n\n* Minor Compaction Impl\n\n* Deprecated fixes\n\n* Check minor compaction\n\n* Minor compaction impl\n\n* Documentations\n\n* Tidy up segmentProvider\n\n* We no longer need the context key\n\n* Bug fixes\n\n* Trim up unnecessary code\n\n* Docs and stylecheck\n\n* Checkstyle\n\n* Documentations first\n\n* Integrate minor compaction spec\n\n* Checkstyle\n\n* Docs\n\n* Tidy up changes\n\n* Revert changes to SpecificSegmentsSpec\n\n* Spell check\n\n* Force time chunk lock only for minor compaction\n\n* Docs\n\n* Minor compaction test\n\n* Native minor compaction add in default values\n\n* Junit5 for TaskLockHelperTest\n\n* ImmutableXXX.of() -\u003e XXX.of() methods in Test\n\n* Validate minor compaction task configs\n\n* Compaction IO Config creation refactor\n\n* Validations\n\n* Test fixes\n\n* Remove test asserting finding segments to lock\n\n* Validation tests\n\n* Rename uncompacted -\u003e minor compaction\n\n* Checkstyle\n\n* Solve validation errors in InputSpecTest\n\n* Change name to use \u0027minor\u0027 for compaction\n\n* Minor compaction input spec fix\n\n* Revert unintended changes in Druid.xml\n\n* Remove uncompacted spellcheck exclusion\n\n* Minor compaction constructor should validate the lock\n\n* Nitpicked test name change\n\n* Remove unnecessary validation\n\n* Partial Compaction Testing\n\n* Checkstyle\n\n* Apply suggestion from @kfaraz\n\nCo-authored-by: Kashif Faraz \u003ckashif.faraz@gmail.com\u003e\n\n* Change ImmutableMap.of moments to Map.of\n\n---------\n\nCo-authored-by: Kashif Faraz \u003ckashif.faraz@gmail.com\u003e"
    },
    {
      "commit": "2fde1f5f970856ae7fc0120cdf64e10382563761",
      "tree": "60405898e7c22b94ed73d5f1c848022c3b9dec15",
      "parents": [
        "fb3eb677ea0b1e54e3336257d77e76a676384ec1"
      ],
      "author": {
        "name": "Zoltan Haindrich",
        "email": "kirk@rxd.hu",
        "time": "Thu Mar 26 07:18:56 2026 +0100"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 26 07:18:56 2026 +0100"
      },
      "message": "test: improve CI stability (#19207)"
    },
    {
      "commit": "fb3eb677ea0b1e54e3336257d77e76a676384ec1",
      "tree": "8b5b2d08b5da5d7eab1f322004f3035bae62eced",
      "parents": [
        "2978d6af699df2cac4ae37d6d0c8e15679b809dd"
      ],
      "author": {
        "name": "Lucas Capistrant",
        "email": "capistrant@users.noreply.github.com",
        "time": "Wed Mar 25 22:20:07 2026 -0500"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Mar 25 22:20:07 2026 -0500"
      },
      "message": "refactor: CascadingReindexingTemplate Refactoring (#19106)\n\n* Refactor ReindexingRule types and concepts\n\n* Close test coverage gaps\n\n* Remove some repetitive validation from the virtual column merge in reindexing config builder\n\n* Eagerly fail with clear user facing error if an invalid spec with partitionsSpec in tuning config is submitted"
    },
    {
      "commit": "2978d6af699df2cac4ae37d6d0c8e15679b809dd",
      "tree": "43b96346b1c8ec0e70ceea45258b71a8b3ab7ca9",
      "parents": [
        "5f77596d42bc38173dbff3a491d9a035094d9d6c"
      ],
      "author": {
        "name": "Ben Smithgall",
        "email": "ben.smithgall@gmail.com",
        "time": "Wed Mar 25 21:03:03 2026 -0400"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Mar 25 18:03:03 2026 -0700"
      },
      "message": "add support for GCS warehouses in iceberg (#19137)\n\nAdd `GoogleCloudStorageInputSourceFactory` to allow reading Iceberg data\nfiles from `gs://` paths with `\"warehouseSource\": \"google\"`. Add\n`iceberg-gcp` and `google-cloud-storage` dependencies to iceberg\nextension."
    },
    {
      "commit": "5f77596d42bc38173dbff3a491d9a035094d9d6c",
      "tree": "5da4ddc211c1beabe2e3e4968ad286e76b574c4c",
      "parents": [
        "0bccfb7f449cd8f953b5cc6c86a17509acba5c3f"
      ],
      "author": {
        "name": "Cece Mei",
        "email": "yingqian.mei@gmail.com",
        "time": "Wed Mar 25 11:53:28 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Mar 25 11:53:28 2026 -0700"
      },
      "message": "feat: adds row-based compaction eligibility filtering  (#19205)\n\n* rows\n\n* document\n\n* zero\n\n* nullable\n\n* spell"
    },
    {
      "commit": "0bccfb7f449cd8f953b5cc6c86a17509acba5c3f",
      "tree": "c84223e98e1cdef2a7516fefbc16d37781359554",
      "parents": [
        "7bdd71335c4bfb659d3b0458136f0f4eeef7d02e"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Tue Mar 24 14:28:00 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 24 14:28:00 2026 -0700"
      },
      "message": "fix: getOnlyNonBroadcastInputAsStageId should ignore non-stage inputs. (#19201)\n\nThis is a helper in ControllerQueryKernelUtils that helps the controller\ndetermine whether a stage can run in MEMORY output mode. The helper\nwas erroneously considering all non-broadcast inputs, when really\nit should only consider non-broadcast stage-typed inputs."
    },
    {
      "commit": "7bdd71335c4bfb659d3b0458136f0f4eeef7d02e",
      "tree": "3d1f3323f44a6f3f6b4115c1ee66de6cbd53f6bd",
      "parents": [
        "619a61f779b55f3ccb65cd07bd7cd5792fbaf5f3"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Tue Mar 24 07:23:30 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 24 09:23:30 2026 -0500"
      },
      "message": "fix: Use correct icon for realtime queries. (#19200)\n\nPR #19196 was meant to use a bottom-left icon for realtime queries,\nnot a bottom-right icon."
    },
    {
      "commit": "619a61f779b55f3ccb65cd07bd7cd5792fbaf5f3",
      "tree": "8c7e29fa4f8b5066c5a7d05eacecc3ac1e3a4a0c",
      "parents": [
        "4bd73dab5ffd96b5ab3f1858c5eeaed7a24315e8"
      ],
      "author": {
        "name": "jtuglu1",
        "email": "jtuglu@netflix.com",
        "time": "Mon Mar 23 23:14:46 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 23 23:14:46 2026 -0700"
      },
      "message": "perf: More TaskQueue optimizations (#19199)\n\nSmall change. Reduces CPU footprint on task scheduling path by acquiring key (or range)-based lock over tasks map (instead of acquiring giant lock + copying/filtering task list to check a single value)."
    },
    {
      "commit": "4bd73dab5ffd96b5ab3f1858c5eeaed7a24315e8",
      "tree": "35db71ab6b4ade9fcb05d92c35379521a8df4363",
      "parents": [
        "27a8fc30477713dff82a2daeeb476a3615fa0d88"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Mon Mar 23 21:56:58 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 23 21:56:58 2026 -0700"
      },
      "message": "refactor: migrate integration-tests-ex/tools into embedded-tests (#19175)\n\nchanges:\n* moved useful classes from integration-tests-ex/tools into embedded-tests, deleted integration-tests-ex/\n* remove some unused github actions scripts\n* mark all dependencies of embedded-tests as test scope\n* set enforcer to ignore test scope for requireUpperBoundDeps in embedded-tests"
    },
    {
      "commit": "27a8fc30477713dff82a2daeeb476a3615fa0d88",
      "tree": "45910e58e8754e517bd754f9d994748f11368a1a",
      "parents": [
        "0a3ad942030090b8fe5680ee8bda6d4fa103c7e6"
      ],
      "author": {
        "name": "Jay Kanakiya",
        "email": "jaykanakiya42@gmail.com",
        "time": "Mon Mar 23 14:56:22 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 23 14:56:22 2026 -0700"
      },
      "message": "minor: change maxStringLength default (#19198)\n\nFollow up to #19146\n\n\nUpdated the default value for druid.indexing.formats.maxStringLength to null from 0. This change also included documentation update for the same."
    },
    {
      "commit": "0a3ad942030090b8fe5680ee8bda6d4fa103c7e6",
      "tree": "8148f9cadfe97701d8795873590074a65dd3f9bb",
      "parents": [
        "534dd2bf36aa6c760ba0251f87e6abc4c73eec7c"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Mon Mar 23 12:30:33 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 23 12:30:33 2026 -0700"
      },
      "message": "feat: Add counters, remove warehouse from DataServerQueryHandlers (#19196)\n\nTwo changes to MSQ DataServerQueryHandlers:\n\n1) Add \"queries\" and \"totalQueries\" counters, reflecting queries made\n   to realtime servers to get realtime data.\n\n2) Remove dependency on QueryToolChestWarehouse and QueryToolChest.\n   When mapping functions from these are needed, the query logic passes\n   it in rather than it being derived from the toolchest.\n\nWeb console changes are included for (1). Realtime query counters are\nshown in an ↙ icon next to VSF counters."
    },
    {
      "commit": "534dd2bf36aa6c760ba0251f87e6abc4c73eec7c",
      "tree": "5eacd6ce7e1d3f41c6ee66503e4c84104af292c0",
      "parents": [
        "71862170ac31386e05f68406b9aee7b32be8c86f"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Sun Mar 22 22:00:38 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Sun Mar 22 22:00:38 2026 -0700"
      },
      "message": "docs: Default isolation.level is read_committed. (#19194)\n\nIn KafkaRecordSupplier, we set isolation.level to read_committed if\nthe user has not explicitly set it to something else. This patch\nupdates kafka-ingestion.md to reflect that."
    },
    {
      "commit": "71862170ac31386e05f68406b9aee7b32be8c86f",
      "tree": "cc2ccbe5f625141d7b0320006d4e7e35642293ba",
      "parents": [
        "0c6a1da299bf14ab352976d8954ab51192c667b9"
      ],
      "author": {
        "name": "jtuglu1",
        "email": "jtuglu@netflix.com",
        "time": "Fri Mar 20 20:59:15 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 20:59:15 2026 -0700"
      },
      "message": "feat: add dynamic default query context (#19144)\n\nAdd dynamic default query context to brokers. Allows operators to quickly tune the default setting for query context values without requiring a re-deployment or forcing upstream clients to update their queries."
    },
    {
      "commit": "0c6a1da299bf14ab352976d8954ab51192c667b9",
      "tree": "875f9516b4bf6c46fd2205baa1311c7c3b6fea5f",
      "parents": [
        "91e7ec4402290e9431b0b223857a1c0f460279de"
      ],
      "author": {
        "name": "Jay Kanakiya",
        "email": "jaykanakiya42@gmail.com",
        "time": "Fri Mar 20 14:23:02 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 14:23:02 2026 -0700"
      },
      "message": "feat: Add configurable truncation for string columns (#19146)\n\n* support configurable truncation for string columns\n\n* add spelling for checks\n\n* add validations and update tests\n\n* trigger build\n\n* add single value mvd test and doc update\n\n* Update docs/ingestion/ingestion-spec.md\n\nCo-authored-by: aho135 \u003candrewho135@gmail.com\u003e\n\n---------\n\nCo-authored-by: aho135 \u003candrewho135@gmail.com\u003e"
    },
    {
      "commit": "91e7ec4402290e9431b0b223857a1c0f460279de",
      "tree": "6466b8544fd3cfd744aaf4f6a82da94027a7440e",
      "parents": [
        "30c8b337b777212734d2e223395a490a040af5b3"
      ],
      "author": {
        "name": "zhan7236",
        "email": "76658920+zhan7236@users.noreply.github.com",
        "time": "Sat Mar 21 03:21:40 2026 +0800"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 12:21:40 2026 -0700"
      },
      "message": "Remove redundant TopNQueryConfig#minTopNThreshold (#18790)\n\nThis removes the minTopNThreshold config from TopNQueryConfig as it\ncan be overridden by the query context variable. The default value is\nstill available as a constant TopNQueryConfig.DEFAULT_MIN_TOPN_THRESHOLD."
    },
    {
      "commit": "30c8b337b777212734d2e223395a490a040af5b3",
      "tree": "2349d6757cbf546e03e2470c62cf3945c0636f40",
      "parents": [
        "47bcc3f9df129aaeeab7975724a5835f0fbb5647"
      ],
      "author": {
        "name": "jtuglu1",
        "email": "jtuglu@netflix.com",
        "time": "Fri Mar 20 09:33:57 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 09:33:57 2026 -0700"
      },
      "message": "dev: create convention commit syntax (#19089)\n\nThis change ensures commits to master have their titles conform to the conventional commit syntax, which helps with easily identifying bugfixes, breaking changes, etc. and auto-generation of things like release notes."
    },
    {
      "commit": "47bcc3f9df129aaeeab7975724a5835f0fbb5647",
      "tree": "a4175d89e4236d32d34947904d8a0db440f8876e",
      "parents": [
        "4b082a53db0db5edd0d916a598a110de8b05d3ab"
      ],
      "author": {
        "name": "Kashif Faraz",
        "email": "kashif.faraz@gmail.com",
        "time": "Fri Mar 20 21:05:25 2026 +0530"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 08:35:25 2026 -0700"
      },
      "message": "Add new metric `ingest/rows/published` to fix flaky `KinesisFaultToleranceTest` (#19177)"
    },
    {
      "commit": "4b082a53db0db5edd0d916a598a110de8b05d3ab",
      "tree": "f6724c8c92d1c8fa17e1f4d78a943a64dcd4b54c",
      "parents": [
        "d383fac0b692a04b20280891096dbf200594f38d"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 20 08:32:03 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 08:32:03 2026 -0700"
      },
      "message": "MSQ: Get row count for counters before mapping. (#19186)\n\nMapped segments generally do not have a PhysicalSegmentInspector, so\ngetting the segment row count after mapping often leads to zero."
    },
    {
      "commit": "d383fac0b692a04b20280891096dbf200594f38d",
      "tree": "27b451ff1b340a6939e53965258599fe244919dd",
      "parents": [
        "f9f0759c5e4fdcce646ec719a7602a17436a54ad"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 20 08:31:37 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 08:31:37 2026 -0700"
      },
      "message": "Console: Show Dart sqlQueryId in details pane. (#19185)\n\nShow both the unique execution ID (dartQueryId) and the potentially\nsqlQueryId. This is useful when the sqlQueryId is user-provided and\nmeaningful, and is also useful for correlating with SQL query logs."
    },
    {
      "commit": "f9f0759c5e4fdcce646ec719a7602a17436a54ad",
      "tree": "16e46f8e19bb9ad5b3787005de2afb390fb98b97",
      "parents": [
        "844f4fa04bbabb80ef29b005f26efde3a2c85317"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 20 08:30:39 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 08:30:39 2026 -0700"
      },
      "message": "Console: Report workers active with any interesting activity. (#19183)\n\nCurrently workers are reported as active in the web console if they\nreport nonzero rows for any channel. However, because segment input\nrows are typically reported when the segment is done processing,\nworkers that just started can be active even when they have not yet\nreported rows.\n\nThis patch adjusts the logic such that any nonzero rows, files, bytes,\nframes, or wall time is enough to consider a worker active."
    },
    {
      "commit": "844f4fa04bbabb80ef29b005f26efde3a2c85317",
      "tree": "85531e770bd80ace459ca2146454c3319c86f494",
      "parents": [
        "a5dad129e95cdea9d661b1700f26f7b6d0cc0632"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 20 08:30:20 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 08:30:20 2026 -0700"
      },
      "message": "Console: Improve cancellation in current-dart-panel. (#19182)\n\nTwo improvements:\n\n1) Only show \"Cancel query\" if the query is ACCEPTED or RUNNING.\n\n2) Cancellation can sometimes return 404 even when successful if there\n   are multiple Brokers. It can also return 404 if the query finished\n   before it could be canceled. Neither of these really deserve a\n   \"danger\" toast. Change to a success toast with text \"Query canceled\n   or no longer running\"."
    },
    {
      "commit": "a5dad129e95cdea9d661b1700f26f7b6d0cc0632",
      "tree": "4fa8def885bc17269ad4f4b24bf451743f0e6c95",
      "parents": [
        "4dccad7fb000461234d69ee98870a90671e894d8"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 20 08:29:53 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 20 08:29:53 2026 -0700"
      },
      "message": "Web console: Add test-unit script. (#19184)\n\nThe \"test\" script runs the full script/build, which takes a while\nand isn\u0027t necessary. It also runs e2e tests, which require a running\nDruid service, which is not always going to be available.\n\nThis patch adds a \"test-unit\" script that builds SQL docs only, not\nthe full webpack bundle, and runs unit tests only."
    },
    {
      "commit": "4dccad7fb000461234d69ee98870a90671e894d8",
      "tree": "f4bfabfa885127e2fd08aa98d25bc45948099b0f",
      "parents": [
        "a126e2bfa46e6da19b3fe3966fe23b8d185d7acc"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Thu Mar 19 11:09:11 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 19 11:09:11 2026 -0700"
      },
      "message": "add SQL parameter values in RequestLogLine so they can be logged/emitted (#19067)"
    },
    {
      "commit": "a126e2bfa46e6da19b3fe3966fe23b8d185d7acc",
      "tree": "c8b98d606113dee9b4822dcfa45821b6ec0f51b3",
      "parents": [
        "926cffb1302141cf25fd990064fdff99c34b1271"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Thu Mar 19 11:08:10 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 19 11:08:10 2026 -0700"
      },
      "message": "Add support for MSQ CLUSTERED BY expressions to be preserved in the segment shard spec as virtual columns (#19061)\n\nchanges:\n* `ShardSpec` interface has a new method, `getDomainVirtualColumns` to provide the virtual column information for pruning\n* `DimensionRangeShardSpec` stores `VirtualColumns` in segment metadata so they can be compared to query expressions and be used for pruning\n* `FilterSegmentPruner` is virtual column aware for segment pruning using the new methods\n* `SegmentGeneratorStageProcessor` now contains a map of column name to `VirtualColumn` alongside, to support cluster key columns being virtual columns\n* `ControllerImpl` persists clustering virtual columns in compaction state in the transform spec\n* `MSQCompactionRunner` handles virtual columns in order-by/cluster-by for compaction"
    },
    {
      "commit": "926cffb1302141cf25fd990064fdff99c34b1271",
      "tree": "8c154319dbdc0a1668257f141eebccc8831b2738",
      "parents": [
        "d159e381f8aab1f3f49958047a18964ac8009a4a"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Thu Mar 19 10:55:29 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 19 10:55:29 2026 -0700"
      },
      "message": "add software.amazon.awssdk:sts to aws-common as runtime scope dependency so WebIdentityTokenProvider works correctly (#19178)"
    },
    {
      "commit": "d159e381f8aab1f3f49958047a18964ac8009a4a",
      "tree": "62a65a1da507a27e8c8705ac663880575f509222",
      "parents": [
        "15e98ba04b9012917d784fb6744b3291a837510e"
      ],
      "author": {
        "name": "Lucas Capistrant",
        "email": "capistrant@users.noreply.github.com",
        "time": "Thu Mar 19 12:47:30 2026 -0500"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 19 10:47:30 2026 -0700"
      },
      "message": "Fix loss loss of cross region bucket read config for s3 client in the v2 migration (#19180)"
    },
    {
      "commit": "15e98ba04b9012917d784fb6744b3291a837510e",
      "tree": "c6f8dcd081324aa8f298faf5641bc74f10a2101d",
      "parents": [
        "fa55b6b94093935b0a73e0598f37d302c6ec4579"
      ],
      "author": {
        "name": "Lucas Capistrant",
        "email": "capistrant@users.noreply.github.com",
        "time": "Wed Mar 18 16:57:21 2026 -0500"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Mar 18 14:57:21 2026 -0700"
      },
      "message": "Fix service discovery bug in `kubernetes-extensions` (#19139)"
    },
    {
      "commit": "fa55b6b94093935b0a73e0598f37d302c6ec4579",
      "tree": "fcad5a4e0bf3ece3bc858fa0a8df97847176f8c5",
      "parents": [
        "a7dae26cf6d9ac64ef08c4fa6509cef7f8a4973b"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Wed Mar 18 11:20:11 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Mar 18 11:20:11 2026 -0700"
      },
      "message": "remove parser/parserMap from DataSchema (#19173)"
    },
    {
      "commit": "a7dae26cf6d9ac64ef08c4fa6509cef7f8a4973b",
      "tree": "b962aedb928f57f53be89ac568b064f367909b15",
      "parents": [
        "eb694b075190e8edb62afb717f1a61ffabd9c52e"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Wed Mar 18 09:12:33 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Mar 18 09:12:33 2026 -0700"
      },
      "message": "MSQ: Add workerDesc to WorkerStats. (#19171)\n\nThe existing field workerId is designed for MSQ itself to uniquely\nidentify a worker. The new field workerDesc is for users (and the web\nconsole) to more easily identify where a worker is running. Its javadoc\nspecs it as either a task ID or a host:port.\n\nThis patch also updates the web console to use workerDesc, if available,\nto show the task (with link to the tasks view) or show the server. This\nreplaces some older, more brittle logic that was constructing a task ID\nfrom the query ID and worker number. The approach in this patch better\nhandles task relaunches, in which case the worker number -\u003e task ID\nmapping changes."
    },
    {
      "commit": "eb694b075190e8edb62afb717f1a61ffabd9c52e",
      "tree": "45da344e7b8c3d1e303ef6d77fa6e71ce9026fb6",
      "parents": [
        "fef695115d4c27cc7658b9a31a73ac0cefbf2c9a"
      ],
      "author": {
        "name": "Ben Smithgall",
        "email": "ben.smithgall@gmail.com",
        "time": "Wed Mar 18 10:10:29 2026 -0400"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Wed Mar 18 09:10:29 2026 -0500"
      },
      "message": "upgrade to iceberg 1.7.2 (#19172)"
    },
    {
      "commit": "fef695115d4c27cc7658b9a31a73ac0cefbf2c9a",
      "tree": "8414991e03a54a6af4e1875fe9a8a6a7d5b3f1d3",
      "parents": [
        "e1dbc749d10a130b68166f65266e64616700de1e"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Tue Mar 17 22:37:40 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 22:37:40 2026 -0700"
      },
      "message": "allow Collection for VirtualColumns.create and tidy up callers (#19174)"
    },
    {
      "commit": "e1dbc749d10a130b68166f65266e64616700de1e",
      "tree": "d5e1f23ad30fd14e11f5c85274f2a55d950492f0",
      "parents": [
        "746cae6bf1261f401904400d0645eac3050a341b"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Tue Mar 17 21:43:46 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 21:43:46 2026 -0700"
      },
      "message": "Improved ScheduledExecutors behavior and metrics. (#19168)\n\nMain changes:\n\n1) Fix scheduleAtFixedRate to schedule the next run after the prior\n   run completes, rather than before it starts. The previous logic could\n   lead to the callable running concurrently with itself when the\n   scheduled executor is multi-threaded.\n\n2) Align scheduleAtFixedRate and scheduleWithFixedDelay so both of them\n   re-schedule when the callable throws an exception. Previously\n   scheduleAtFixedRate did re-schedule but scheduleWithFixedDelay did not.\n\n3) Add DelayMetricEmittingScheduledExecutorService which emits a metric\n   for scheduling delay. This helps operators understand if a particular\n   scheduled executor is too overloaded to run tasks on time.\n\n4) Add scheduling delay metrics: overlord/duty/wait/millis,\n   coordinator/duty/wait/millis, and ingest/reporting/wait/millis."
    },
    {
      "commit": "746cae6bf1261f401904400d0645eac3050a341b",
      "tree": "eb093b1572f9fc0b8d765325b550fdd78b8c9f75",
      "parents": [
        "01abc30bea7d2820554fbcb73187491f8ea92c17"
      ],
      "author": {
        "name": "Abhishek Radhakrishnan",
        "email": "abhishek.rb19@gmail.com",
        "time": "Tue Mar 17 20:42:00 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 20:42:00 2026 -0700"
      },
      "message": "Preserve row signature column order when column analysis has errors (#19162)\n\n* Fix https://github.com/apache/druid/issues/18437\n\nWhen column analysis encounters errors during fold, the current behavior can cause row signatures to flap on the Brokers, which in turn leads to sporadic query failures or incorrect query results, since query plans rely on the Broker’s segment metadata cache. This issue is more pronounced during segment analysis on realtime servers with JSON columns, where the fold may sometimes produce column analysis errors, presumably due to type coercion.\n\nThis patch ensures that columns are not skipped when such errors occur preserving the row signature\u0027s order.\n\nNote: https://github.com/apache/druid/issues/19176 may still occur, where the current behavior is that types will fall back to string when such errors are encountered."
    },
    {
      "commit": "01abc30bea7d2820554fbcb73187491f8ea92c17",
      "tree": "407bdca51aa51197c67d0d8200532a610320cf1d",
      "parents": [
        "f9d8ef936c6a963868850092c3f9a2d3435d72db"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Tue Mar 17 13:11:11 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 13:11:11 2026 -0700"
      },
      "message": "remove support for hadoop ingestion (#19109)\n\nchanges:\n* removes support for hadoop ingestion and all associated hadoop specific extension functionality (materialized view extensions, hadoop parsers for avro, parquet, etc)\n* adds hadoop task stub for serde and logging to indicate that hadoop ingestion support has been removed\n* updates hadoop docs to indicate it is removed, redirect other hadoop specific doc pages\n* remove legacy integration tests too since the only remaining tests were hadoop ingestion tests"
    },
    {
      "commit": "f9d8ef936c6a963868850092c3f9a2d3435d72db",
      "tree": "b79434b18b5913a268f0ec474fc271852d1eb7ba",
      "parents": [
        "7913d036acf1c350688a0c63eb411e4d9cc890b0"
      ],
      "author": {
        "name": "Cece Mei",
        "email": "yingqian.mei@gmail.com",
        "time": "Tue Mar 17 11:45:32 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 11:45:32 2026 -0700"
      },
      "message": "fix flaky compaction test (#19157)\n\n* test-flaky\n\n* processed\n\n* batch-ingest\n\n* flaky\n\n* flaky\n\n* comment"
    },
    {
      "commit": "7913d036acf1c350688a0c63eb411e4d9cc890b0",
      "tree": "ad95f14de1cce027dfb07f5f79ab73da30d98801",
      "parents": [
        "1d648968a1d3604958aa26dd51b00e2013add8e2"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Tue Mar 17 08:54:10 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 10:54:10 2026 -0500"
      },
      "message": "MSQ: Restore memory-access optimization during sorting. (#19170)\n\nDue to a mistake in #18181, the direct row-memory-access optimization\nwas lost for outputs of FrameChannelHashPartitioner and\nFrameChannelMerger. This patch restores it, which should speed up\nhash partitioning, range partitioning, and sorting in general."
    },
    {
      "commit": "1d648968a1d3604958aa26dd51b00e2013add8e2",
      "tree": "0f9865dbb6e304ffaf13ed1cca5812edf8b2c45e",
      "parents": [
        "068e5720dba1c5e1c8528008250e6686d2062767"
      ],
      "author": {
        "name": "Guowei Wu",
        "email": "185086661@qq.com",
        "time": "Tue Mar 17 21:45:43 2026 +0800"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 21:45:43 2026 +0800"
      },
      "message": "Fix negative Kafka partition lag caused by inconsistent current/latest offsets (#18750)\n\n* Fix negative Kafka partition lag caused by inconsistent current/latest offsets\n\n* Address review comments\n\n* Fix stale Javadoc and brittle tests after removing latestSequenceFromStream"
    },
    {
      "commit": "068e5720dba1c5e1c8528008250e6686d2062767",
      "tree": "f63e62997080c312ef3b0b9e6e28d9ca183e390d",
      "parents": [
        "2956e98e2d5bcc6e780c7bc313cca4ebffa6a51f"
      ],
      "author": {
        "name": "Kashif Faraz",
        "email": "kashif.faraz@gmail.com",
        "time": "Tue Mar 17 15:15:32 2026 +0530"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Tue Mar 17 15:15:32 2026 +0530"
      },
      "message": "Improve extensibility of MSQ Dart engine via extensions (#19127)\n\nThis is a collection of minor refactors that help extensions use and override certain Dart capabilities.\n\nChanges:\n- Add utility method `ServletResourceUtils.createAsyncTimeoutListener`\n- Track idle duration of a Dart worker\n- Fix visibility of some methods and classes\n- Add equals and hashCode for `ChangeRequestHistory.Counter`\n- Add `SegmentListerResourceTest`\n- Add test method to `ServletResourceUtilsTest`"
    },
    {
      "commit": "2956e98e2d5bcc6e780c7bc313cca4ebffa6a51f",
      "tree": "7792ad1ca966ee6901bc58ca105f2e1a38a6ef6b",
      "parents": [
        "da6c02ecbbdc86733cc835339cafa0c3bc24e4eb"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Mon Mar 16 23:28:26 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 23:28:26 2026 -0700"
      },
      "message": "migrate hdfs integration tests to embedded-tests (#19158)"
    },
    {
      "commit": "da6c02ecbbdc86733cc835339cafa0c3bc24e4eb",
      "tree": "089916853a435a78b3c3899926dbf5858e6deb63",
      "parents": [
        "15b4003ac4b3318eccc1ef085e69bc56d3af25f1"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Mon Mar 16 23:24:29 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 23:24:29 2026 -0700"
      },
      "message": "remove deprecated InputRowParser from SeekableStreamIndexTaskRunner (kafka, kinesis, etc) (#19166)"
    },
    {
      "commit": "15b4003ac4b3318eccc1ef085e69bc56d3af25f1",
      "tree": "18155c0e4ef0e6759a06937e8c98813c383b0ded",
      "parents": [
        "776f0ee5c8896bf437c5ddeba4493e5e9c100443"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Mon Mar 16 22:22:00 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 22:22:00 2026 -0700"
      },
      "message": "Add durationMs to Dart query reports. (#19169)"
    },
    {
      "commit": "776f0ee5c8896bf437c5ddeba4493e5e9c100443",
      "tree": "9ad227d5cd1cd97938a26cc0ab36f3930fa8fd38",
      "parents": [
        "85b359729598805b5cee2bcfae6c2b95c8af2bc2"
      ],
      "author": {
        "name": "Andreas Maechler",
        "email": "amaechler@gmail.com",
        "time": "Mon Mar 16 21:48:36 2026 -0600"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 20:48:36 2026 -0700"
      },
      "message": "Update version to include project prefix (#19167)"
    },
    {
      "commit": "85b359729598805b5cee2bcfae6c2b95c8af2bc2",
      "tree": "3deb406f4f22ed94bb6728c1117a74fb2729bafe",
      "parents": [
        "288336b4f26a4c973a263b0754583db5cb412d1c"
      ],
      "author": {
        "name": "Lucas Capistrant",
        "email": "capistrant@users.noreply.github.com",
        "time": "Mon Mar 16 14:03:02 2026 -0500"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 14:03:02 2026 -0500"
      },
      "message": "Update easymock, mockito and equalsverifier to latest (#19145)\n\n* update some testing scope dependencies\n\n* work on getting tests inline with new extensions\n\n* Fix a few more equalsverifier tests for new version\n\n* fixup using import for an equalsverifier dep"
    },
    {
      "commit": "288336b4f26a4c973a263b0754583db5cb412d1c",
      "tree": "caeb3fd0651be6a5c5d40d96ebb4329b3495a865",
      "parents": [
        "dc1c4d927371f77d9edcff59ef1b655421030b97"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Mon Mar 16 10:58:26 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 10:58:26 2026 -0700"
      },
      "message": "MSQ: Extend test worker wait timeout to 30s. (#19165)\n\nIn CI sometimes we hit this 10s timeout on MSQDrillWindowQueryTest.\nLooking at successful runs, it can indeed take a few seconds sometimes.\nExtending the timeout to 30s should be enough allowance for potentially\nslow CI."
    },
    {
      "commit": "dc1c4d927371f77d9edcff59ef1b655421030b97",
      "tree": "123540fa36629171000fe02e99b85af37c8eb553",
      "parents": [
        "226f2772afdc51c2dc03e4562199f97d948b33e4"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Mon Mar 16 10:55:49 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 10:55:49 2026 -0700"
      },
      "message": "Fix stop race in MonitorScheduler. (#19161)\n\nWhen MonitorScheduler \"stop\" is called, it in turn calls \"removeMonitor\".\nThis calls \"monitor\" on all monitors. The whole process is synchronized\non the MonitorScheduler itself, but it is not synchronized with the\nregularly-scheduled calls to \"monitor\". It can lead to \"monitor\" running\nconcurrently with itself, which can lead to unpredictable behavior in\nmetrics emission.\n\nOne likely effect of this would be to emit metrics twice, due to the\ncommon pattern of snapshot -\u003e emit deltas from previous snapshot -\u003e\nsave snapshot.\n\nThis patch also removes the unused CompoundMonitor."
    },
    {
      "commit": "226f2772afdc51c2dc03e4562199f97d948b33e4",
      "tree": "da7b4ecc3576325cb6bc73e5c523f3e5d0e0f5e3",
      "parents": [
        "7f001d8a09ef2e434f184e1b046814d3d4db4615"
      ],
      "author": {
        "name": "Kashif Faraz",
        "email": "kashif.faraz@gmail.com",
        "time": "Mon Mar 16 22:27:14 2026 +0530"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 09:57:14 2026 -0700"
      },
      "message": "Emit metric dimension for minor compaction and other refactors (#19151)"
    },
    {
      "commit": "7f001d8a09ef2e434f184e1b046814d3d4db4615",
      "tree": "0fe11fd791572db746cc8afebc6ae69113fb6698",
      "parents": [
        "ae9f51b8289da5d64a887cd49c8207b5bc41ea74"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Mon Mar 16 09:22:11 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 09:22:11 2026 -0700"
      },
      "message": "fix vsf bug when restoring entries on a failed reclaim (#19160)"
    },
    {
      "commit": "ae9f51b8289da5d64a887cd49c8207b5bc41ea74",
      "tree": "db7921fd8ea5da07cb13ac760ad122e7a2797e39",
      "parents": [
        "98312214dcae724d1bb573eba4f8449d8c5f2fa2"
      ],
      "author": {
        "name": "Ben Smithgall",
        "email": "ben.smithgall@gmail.com",
        "time": "Mon Mar 16 00:20:03 2026 -0400"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 09:50:03 2026 +0530"
      },
      "message": "Add embedded-tests for iceberg extension (#19143)\n\nChanges:\n- Add a new `IcebergRestCatalogResource` which uses the `iceberg-rest` docker image.\n- Add a new test `IcebergRestCatalogIngestionTest`"
    },
    {
      "commit": "98312214dcae724d1bb573eba4f8449d8c5f2fa2",
      "tree": "8f626bb5d309dc640a3c5ee714bbcaa35eb9fa47",
      "parents": [
        "c84f097e8719e272368ff9df71d0b73c7dd4eaa3"
      ],
      "author": {
        "name": "Clint Wylie",
        "email": "cwylie@apache.org",
        "time": "Sun Mar 15 20:40:57 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Mon Mar 16 09:10:57 2026 +0530"
      },
      "message": "Migrate oss integration test to embedded test framework (#19159)\n\nThese tests are run against an actual Alibaba OSS cloud storage against a mock or a testcontainer.\nAs such, they are disabled by default and can be run only when the required credentials are provided."
    },
    {
      "commit": "c84f097e8719e272368ff9df71d0b73c7dd4eaa3",
      "tree": "bd288e4e8f32208dbbdbdfa94dc2ee7eeac20505",
      "parents": [
        "a21bf3e2f22f6575d2b1826ec752f63880cd1b82"
      ],
      "author": {
        "name": "Abhishek Radhakrishnan",
        "email": "abhishek.rb19@gmail.com",
        "time": "Sun Mar 15 13:01:40 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Sun Mar 15 13:01:40 2026 -0700"
      },
      "message": "Remove `druid-operator` tree from this repo in favor of `apache/druid-operator` repo (#19156)"
    },
    {
      "commit": "a21bf3e2f22f6575d2b1826ec752f63880cd1b82",
      "tree": "50c59df59caf3d72728bd626983e3d29f60db473",
      "parents": [
        "9c9213e27b68387e88a905cc2cfcf5fdf4e90ea5"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Sun Mar 15 12:33:41 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Sun Mar 15 12:33:41 2026 -0700"
      },
      "message": "Fix flipped logic in KinesisFaultToleranceTest. (#19153)\n\nThe \"publishToSingleShard\" boolean was being used backwards: when\ntrue we would publish to all shards, when false we\u0027d publish only\nto shard \"0\". Fixing this will possibly help with flakiness for this\ntest."
    },
    {
      "commit": "9c9213e27b68387e88a905cc2cfcf5fdf4e90ea5",
      "tree": "05294006f96f148ff1d8714f307dddada103388a",
      "parents": [
        "6dd5459b06861abd03ce2103a681527e860f5219"
      ],
      "author": {
        "name": "Sasha Syrotenko",
        "email": "alexander.syrotenko@imply.io",
        "time": "Sun Mar 15 08:36:40 2026 +0200"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Sun Mar 15 12:06:40 2026 +0530"
      },
      "message": "Relax condition in test_autoScaler_scalesUpAndDown_withSlowPublish (#19155)"
    },
    {
      "commit": "6dd5459b06861abd03ce2103a681527e860f5219",
      "tree": "c98886be9ec7e2f6aec74911510349cec60bd5e3",
      "parents": [
        "947ddd5e29761c6ac89d0cf1ed4724980a291f72"
      ],
      "author": {
        "name": "Lucas Capistrant",
        "email": "capistrant@users.noreply.github.com",
        "time": "Fri Mar 13 12:58:57 2026 -0500"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 13 12:58:57 2026 -0500"
      },
      "message": "K8s extensions junit5 migration (#19154)\n\n* Migrate kubernetes-extensions to junit5\n\n* Convert kubernetes-overlord-extensions to junit5\n\n* get rid of imports of static assertions"
    },
    {
      "commit": "947ddd5e29761c6ac89d0cf1ed4724980a291f72",
      "tree": "0571ef237bc5d52850fa7b2192f209bf0591a5b0",
      "parents": [
        "0aedeb9cd4630ef76d9be26439a902b5bf1502d6"
      ],
      "author": {
        "name": "Jay Kanakiya",
        "email": "jaykanakiya42@gmail.com",
        "time": "Fri Mar 13 10:25:58 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 13 10:25:58 2026 -0700"
      },
      "message": "Fix typos in documentation (#19152)\n\nFixes #18643 and couple of other typos in documentation."
    },
    {
      "commit": "0aedeb9cd4630ef76d9be26439a902b5bf1502d6",
      "tree": "4e43fedc01e47c7bdc70d57f6dc8b5b0bcba7db3",
      "parents": [
        "f7f74f455e5db4c71b6b5a94c743a72bb27c3a4f"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 13 05:35:49 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 13 18:05:49 2026 +0530"
      },
      "message": "Don\u0027t include loadTime in LoadSegmentsResult when not loading. (#19149)\n\nThere is a check in QueryVirtualStorageTest.assertQueryMetrics that\nverifies query/load/batch/time is zero when query/load/count is zero,\nand it sometimes flakes. This patch should hopefully fix the flakes."
    },
    {
      "commit": "f7f74f455e5db4c71b6b5a94c743a72bb27c3a4f",
      "tree": "91bd5ee5d831ec9f682c10f29bf1b8bdcbf465a3",
      "parents": [
        "e22986ee98fdd5e852a0430d13982da7d12b44a8"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 13 03:46:54 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 13 16:16:54 2026 +0530"
      },
      "message": "Fix flaky KillSupervisorsCustomDutyTest. (#19150)\n\nIt is possible for the KillSupervisorsCustomDutyTest to flake\nbecause the final getSupervisorHistory call fails to return 404.\n\nIt is possible that this happens because the two entries are\ncleaned up in different duty runs, perhaps because the tombstone\nwas too new to clean up on the first run. To guard against this,\nwait for the metadata/kill/supervisor/count metric to sum to two."
    },
    {
      "commit": "e22986ee98fdd5e852a0430d13982da7d12b44a8",
      "tree": "e2dd658acf81625af7527fb6e5b583d696be138f",
      "parents": [
        "ee614dc3439835e05cd1670bd5f145e675b98f8a"
      ],
      "author": {
        "name": "Maytas Monsereenusorn",
        "email": "maytasm@apache.org",
        "time": "Fri Mar 13 01:16:07 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 13 01:16:07 2026 -0700"
      },
      "message": "Add spill file count limit for GroupBy query (#19141)\n\nGroupBy queries that group on high-cardinality dimensions can create a large number of spill files. This problem is more likely when queries contain many aggregators and/or aggregators with large memory footprints (e.g., DataSketch). This is because GroupBy can only hold a limited number of unique groupings in memory before flushing to disk — the exact limit depends on the size of each row, which is determined by the size of the aggregators. The issue arises when GroupBy attempts to merge all the spill files. Currently, GroupBy merges spill files by opening all of them simultaneously. Opening these files requires memory for objects such as MappingIterator, SmileParser, etc., which can cause historical nodes to OOM.\n\nThis PR fixes the issue by introducing a new property: druid.query.groupBy.maxSpillFileCount\nThe maximum number of spill files allowed per GroupBy query. When the limit is reached, the query fails with a ResourceLimitExceededException. This property can be used to prevent historical nodes from OOMing due to an excessive number of spill files being opened simultaneously during the merge phase. Defaults to Integer.MAX_VALUE (unlimited). Can also be set per query via the query context key maxSpillFileCount.\n\nNote that this new config, maxSpillFileCount, is complementary to the existing maxOnDiskStorage. maxOnDiskStorage limits total bytes across all spill files, but cannot prevent a large number of tiny files — a query can create hundreds of thousands of spill files while staying well under the byte limit. maxSpillFileCount fills this gap by limiting file count directly, which bounds the number of simultaneously open file handles during the merge phase. This situation arises when aggregators like ThetaSketch pre-allocate a large fixed buffer per row in memory (e.g. ~131KB), causing the buffer to flush frequently with only a small number of rows; since each row corresponds to a unique grouping key in a high-cardinality dimension, each sketch has seen very few values at flush time and serializes to only a few bytes on disk using the sketch\u0027s compact format."
    },
    {
      "commit": "ee614dc3439835e05cd1670bd5f145e675b98f8a",
      "tree": "b0d0816ae47232b108c443f10c30cc53db103b43",
      "parents": [
        "30cdb06e54556d4ecbad0597405977697033aaee"
      ],
      "author": {
        "name": "Gian Merlino",
        "email": "gianmerlino@gmail.com",
        "time": "Fri Mar 13 01:05:21 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 13 01:05:21 2026 -0700"
      },
      "message": "Add /status/ready endpoint for service health. (#19148)\n\nCurrently the most natural endpoint to use for service health (e.g. if\nadding Druid services to a load balancer) is /status/health. However,\nthis does not play nicely with graceful shutdown mechanisms.\n\nWhen druid.server.http.unannouncePropagationDelay is used, there is a\ndelay between unannounce and server shutdown, which allows Druid\u0027s\ninternal service discovery to stop sending traffic to a service before\nit shuts down its server. However, /status/health continues to return OK\nuntil the server is shut down, so external load balancers cannot take\nadvantage of this.\n\nThis patch adds /status/ready, an endpoint that is tied to announcement.\nIt allows external load balancers to take advantage of this graceful\nshutdown mechanism."
    },
    {
      "commit": "30cdb06e54556d4ecbad0597405977697033aaee",
      "tree": "158cb58850ba382deff4bc3ad6a4dfb73aa02b5d",
      "parents": [
        "77580495d971886822a2f5f34f669740bbf7a1a2"
      ],
      "author": {
        "name": "Akshat Jain",
        "email": "akjn11@gmail.com",
        "time": "Fri Mar 13 12:23:20 2026 +0530"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Fri Mar 13 12:23:20 2026 +0530"
      },
      "message": "Fix authorization failure for table(append(...)) queries caused by quoted datasource names in resource check (#19147)"
    },
    {
      "commit": "77580495d971886822a2f5f34f669740bbf7a1a2",
      "tree": "2ae62275d228152b8b7745ec235a5c4c84ad7122",
      "parents": [
        "f3e7a7b210f1cbe837ebf802bceb84fc912f52fe"
      ],
      "author": {
        "name": "Ashwin Tumma",
        "email": "ashwin.tumma23@gmail.com",
        "time": "Thu Mar 12 08:54:55 2026 -0700"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 12 08:54:55 2026 -0700"
      },
      "message": "Fix CVE-2026-24308: Upgrade Apache ZooKeeper to 3.8.6 (#19135)\n\nUpgrades org.apache.zookeeper from 3.8.4 to 3.8.6 to remediate CVE-2026-24308.\n\n\n---------\n\nCo-authored-by: Ashwin Tumma \u003cashwin.tumma@salesforce.com\u003e"
    },
    {
      "commit": "f3e7a7b210f1cbe837ebf802bceb84fc912f52fe",
      "tree": "da809055377e93a94f8c878f4892f64fbaf4d372",
      "parents": [
        "e95e050442be340a0c2bc61ea09919ad1bd266e2"
      ],
      "author": {
        "name": "Kashif Faraz",
        "email": "kashif.faraz@gmail.com",
        "time": "Thu Mar 12 19:02:10 2026 +0530"
      },
      "committer": {
        "name": "GitHub",
        "email": "noreply@github.com",
        "time": "Thu Mar 12 08:32:10 2026 -0500"
      },
      "message": "Increase timeout in IngestionSmokeTest (#19142)"
    }
  ],
  "next": "e95e050442be340a0c2bc61ea09919ad1bd266e2"
}
