blob: c358f41be0f54fd55301b6e61d6cb051eb6972ce [file] [log] [blame]
"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[595],{3905:(e,t,a)=>{a.d(t,{Zo:()=>u,kt:()=>c});var n=a(67294);function i(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t<arguments.length;t++){var a=null!=arguments[t]?arguments[t]:{};t%2?r(Object(a),!0).forEach((function(t){i(e,t,a[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(a)):r(Object(a)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(a,t))}))}return e}function l(e,t){if(null==e)return{};var a,n,i=function(e,t){if(null==e)return{};var a,n,i={},r=Object.keys(e);for(n=0;n<r.length;n++)a=r[n],t.indexOf(a)>=0||(i[a]=e[a]);return i}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n<r.length;n++)a=r[n],t.indexOf(a)>=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(i[a]=e[a])}return i}var p=n.createContext({}),s=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},u=function(e){var t=s(e.components);return n.createElement(p.Provider,{value:t},e.children)},d="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,i=e.mdxType,r=e.originalType,p=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),d=s(a),h=i,c=d["".concat(p,".").concat(h)]||d[h]||m[h]||r;return a?n.createElement(c,o(o({ref:t},u),{},{components:a})):n.createElement(c,o({ref:t},u))}));function c(e,t){var a=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var r=a.length,o=new Array(r);o[0]=h;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[d]="string"==typeof e?e:i,o[1]=l;for(var s=2;s<r;s++)o[s]=a[s];return n.createElement.apply(null,o)}return n.createElement.apply(null,a)}h.displayName="MDXCreateElement"},33946:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>p,default:()=>c,frontMatter:()=>l,metadata:()=>s,toc:()=>d});var n=a(87462),i=a(63366),r=(a(67294),a(3905)),o=["components"],l={id:"release-notes",title:"Release notes"},p=void 0,s={unversionedId:"release-info/release-notes",id:"release-info/release-notes",title:"Release notes",description:"\x3c!--",source:"@site/docs/latest/release-info/release-notes.md",sourceDirName:"release-info",slug:"/release-info/release-notes",permalink:"/docs/latest/release-info/release-notes",draft:!1,tags:[],version:"current",frontMatter:{id:"release-notes",title:"Release notes"},sidebar:"docs",previous:{title:"Experimental features",permalink:"/docs/latest/development/experimental"},next:{title:"Upgrade notes",permalink:"/docs/latest/release-info/upgrade-notes"}},u={},d=[{value:"Upcoming removals",id:"upcoming-removals",level:2},{value:"Important features, changes, and deprecations",id:"important-features-changes-and-deprecations",level:2},{value:"Concurrent append and replace improvements",id:"concurrent-append-and-replace-improvements",level:3},{value:"Grouping on complex columns",id:"grouping-on-complex-columns",level:3},{value:"Removed ZooKeeper-based segment loading",id:"removed-zookeeper-based-segment-loading",level:4},{value:"Improved groupBy queries",id:"improved-groupby-queries",level:3},{value:"Improved AND filter performance",id:"improved-and-filter-performance",level:3},{value:"Centralized datasource schema (alpha)",id:"centralized-datasource-schema-alpha",level:3},{value:"MSQ support for window functions",id:"msq-support-for-window-functions",level:3},{value:"MSQ support for Google Cloud Storage",id:"msq-support-for-google-cloud-storage",level:3},{value:"RabbitMQ extension",id:"rabbitmq-extension",level:3},{value:"Functional area and related changes",id:"functional-area-and-related-changes",level:2},{value:"Web console",id:"web-console",level:3},{value:"Improved the Supervisors view",id:"improved-the-supervisors-view",level:4},{value:"Search in tables and columns",id:"search-in-tables-and-columns",level:4},{value:"Kafka input format",id:"kafka-input-format",level:4},{value:"Improved handling of lookups during sampling",id:"improved-handling-of-lookups-during-sampling",level:4},{value:"Other web console improvements",id:"other-web-console-improvements",level:4},{value:"General ingestion",id:"general-ingestion",level:3},{value:"Improved Azure input source",id:"improved-azure-input-source",level:4},{value:"Added a new config to <code>AzureAccountConfig</code>",id:"added-a-new-config-to-azureaccountconfig",level:4},{value:"Data management API improvements",id:"data-management-api-improvements",level:4},{value:"Nested columns performance improvement",id:"nested-columns-performance-improvement",level:4},{value:"Improved task context reporting",id:"improved-task-context-reporting",level:4},{value:"Other ingestion improvements",id:"other-ingestion-improvements",level:4},{value:"SQL-based ingestion",id:"sql-based-ingestion",level:3},{value:"Manifest files for MSQ task engine exports",id:"manifest-files-for-msq-task-engine-exports",level:4},{value:"<code>SortMerge</code> join support",id:"sortmerge-join-support",level:4},{value:"State of compaction context parameter",id:"state-of-compaction-context-parameter",level:4},{value:"Selective loading of lookups",id:"selective-loading-of-lookups",level:4},{value:"MSQ task report improvements",id:"msq-task-report-improvements",level:4},{value:"Other SQL-based ingestion improvements",id:"other-sql-based-ingestion-improvements",level:4},{value:"Streaming ingestion",id:"streaming-ingestion",level:3},{value:"Streaming completion reports",id:"streaming-completion-reports",level:4},{value:"Improved memory management for Kinesis",id:"improved-memory-management-for-kinesis",level:4},{value:"Improved autoscaling for Kinesis streams",id:"improved-autoscaling-for-kinesis-streams",level:4},{value:"Parallelized incremental segment creation",id:"parallelized-incremental-segment-creation",level:4},{value:"Kafka steaming supervisor topic improvement",id:"kafka-steaming-supervisor-topic-improvement",level:4},{value:"Querying",id:"querying",level:3},{value:"Dynamic table append",id:"dynamic-table-append",level:4},{value:"Added SCALAR_IN_ARRAY function",id:"added-scalar_in_array-function",level:4},{value:"Improved PARTITIONED BY",id:"improved-partitioned-by",level:4},{value:"Improved catalog tables",id:"improved-catalog-tables",level:4},{value:"Double and null values in SQL type ARRAY",id:"double-and-null-values-in-sql-type-array",level:4},{value:"<code>TypedInFilter</code> filter",id:"typedinfilter-filter",level:4},{value:"Heap dictionaries clear out",id:"heap-dictionaries-clear-out",level:4},{value:"Other querying improvements",id:"other-querying-improvements",level:4},{value:"Cluster management",id:"cluster-management",level:3},{value:"Improved retrieving active task status",id:"improved-retrieving-active-task-status",level:4},{value:"Other cluster management improvements",id:"other-cluster-management-improvements",level:4},{value:"Data management",id:"data-management",level:3},{value:"Changes to Coordinator default values",id:"changes-to-coordinator-default-values",level:4},{value:"Compaction completion reports",id:"compaction-completion-reports",level:4},{value:"<code>GoogleTaskLogs</code> upload buffer size",id:"googletasklogs-upload-buffer-size",level:4},{value:"Other data management improvements",id:"other-data-management-improvements",level:4},{value:"Metrics and monitoring",id:"metrics-and-monitoring",level:3},{value:"New unused segment metric",id:"new-unused-segment-metric",level:4},{value:"Kafka emitter improvements",id:"kafka-emitter-improvements",level:4},{value:"Prometheus emitter improvements",id:"prometheus-emitter-improvements",level:4},{value:"StatsD emitter improvements",id:"statsd-emitter-improvements",level:4},{value:"Improved <code>segment/unavailable/count</code> metric",id:"improved-segmentunavailablecount-metric",level:4},{value:"Other metrics and monitoring improvements",id:"other-metrics-and-monitoring-improvements",level:4},{value:"Extensions",id:"extensions",level:3},{value:"Microsoft Azure improvements",id:"microsoft-azure-improvements",level:4},{value:"Kubernetes improvements",id:"kubernetes-improvements",level:4},{value:"Delta Lake improvements",id:"delta-lake-improvements",level:4},{value:"Improve performance of LDAP credentials validator",id:"improve-performance-of-ldap-credentials-validator",level:4},{value:"Upgrade notes and incompatible changes",id:"upgrade-notes-and-incompatible-changes",level:2},{value:"Upgrade notes",id:"upgrade-notes",level:3},{value:"Append JsonPath function",id:"append-jsonpath-function",level:4},{value:"Kinesis ingestion tuning",id:"kinesis-ingestion-tuning",level:4},{value:"Improved Supervisor rolling restarts",id:"improved-supervisor-rolling-restarts",level:4},{value:"Changes to Coordinator default values",id:"changes-to-coordinator-default-values-1",level:4},{value:"<code>GoogleTaskLogs</code> upload buffer size",id:"googletasklogs-upload-buffer-size-1",level:4},{value:"Incompatible changes",id:"incompatible-changes",level:3},{value:"Changes to <code>targetDataSource</code> in EXPLAIN queries",id:"changes-to-targetdatasource-in-explain-queries",level:4},{value:"Removed ZooKeeper-based segment loading",id:"removed-zookeeper-based-segment-loading-1",level:4},{value:"Removed Coordinator configs",id:"removed-coordinator-configs",level:4},{value:"Changed <code>useMaxMemoryEstimates</code> for Hadoop jobs",id:"changed-usemaxmemoryestimates-for-hadoop-jobs",level:4},{value:"Developer notes",id:"developer-notes",level:3},{value:"Dependency updates",id:"dependency-updates",level:4}],m={toc:d},h="wrapper";function c(e){var t=e.components,l=(0,i.Z)(e,o);return(0,r.kt)(h,(0,n.Z)({},m,l,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("p",null,"Apache Druid 30.0.0 contains over 407 new features, bug fixes, performance enhancements, documentation improvements, and additional test coverage from 50 contributors."),(0,r.kt)("p",null,"See the ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/issues?q=is%3Aclosed+milestone%3A30.0.0+sort%3Aupdated-desc+"},"complete set of changes")," for additional details, including bug fixes."),(0,r.kt)("p",null,"Review the ",(0,r.kt)("a",{parentName:"p",href:"#upgrade-notes"},"upgrade notes")," and ",(0,r.kt)("a",{parentName:"p",href:"#incompatible-changes"},"incompatible changes")," before you upgrade to Druid 30.0.0.\nIf you are upgrading across multiple versions, see the ",(0,r.kt)("a",{parentName:"p",href:"/docs/latest/release-info/upgrade-notes"},"Upgrade notes")," page, which lists upgrade notes for the most recent Druid versions."),(0,r.kt)("h2",{id:"upcoming-removals"},"Upcoming removals"),(0,r.kt)("p",null,"As part of the continued improvements to Druid, we are deprecating certain features and behaviors in favor of newer iterations that offer more robust features and are more aligned with standard ANSI SQL. Many of these new features have been the default for new deployments for several releases."),(0,r.kt)("p",null,"The following features are deprecated, and we plan to remove support as early as Druid 36.0.0:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Multi-value dimensions"),": Druid now offers support for ",(0,r.kt)("a",{parentName:"li",href:"/docs/latest/querying/sql-data-types#arrays"},"array types"),". "),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Non-SQL compliant null handling"),": By default, Druid now differentiates between an empty string and a record with no data as well as between an empty numerical record and ",(0,r.kt)("inlineCode",{parentName:"li"},"0"),". For more information, see ",(0,r.kt)("a",{parentName:"li",href:"/docs/latest/querying/sql-data-types#null-values"},"NULL values"),". For a tutorial on the SQL-compliant logic, see the ",(0,r.kt)("a",{parentName:"li",href:"/docs/latest/tutorials/tutorial-sql-null"},"Null handling tutorial"),"."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Non-strict Boolean handling"),": Druid now strictly uses ",(0,r.kt)("inlineCode",{parentName:"li"},"1")," (true) or ",(0,r.kt)("inlineCode",{parentName:"li"},"0")," (false). Previously, true and false could be represented either as ",(0,r.kt)("inlineCode",{parentName:"li"},"true")," and ",(0,r.kt)("inlineCode",{parentName:"li"},"false")," or as ",(0,r.kt)("inlineCode",{parentName:"li"},"1")," and ",(0,r.kt)("inlineCode",{parentName:"li"},"0"),", respectively. In addition, Druid now returns a null value for Boolean comparisons like ",(0,r.kt)("inlineCode",{parentName:"li"},"True && NULL"),". For more information, see ",(0,r.kt)("a",{parentName:"li",href:"/docs/latest/querying/sql-data-types#boolean-logic"},"Boolean logic"),". For examples of filters that use the SQL-compliant logic, see ",(0,r.kt)("a",{parentName:"li",href:"/docs/latest/querying/filters"},"Query filters"),"."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Two-value logic"),": By default, Druid now uses three-valued logic for both ingestion and querying. This primarily affects filters using logical NOT operations on columns with NULL values. For more information, see ",(0,r.kt)("a",{parentName:"li",href:"/docs/latest/querying/sql-data-types#boolean-logic"},"Boolean logic"),". For examples of filters that use the SQL-compliant logic, see ",(0,r.kt)("a",{parentName:"li",href:"/docs/latest/querying/filters"},"Query filters"),".")),(0,r.kt)("h2",{id:"important-features-changes-and-deprecations"},"Important features, changes, and deprecations"),(0,r.kt)("p",null,"This section contains important information about new and existing features."),(0,r.kt)("h3",{id:"concurrent-append-and-replace-improvements"},"Concurrent append and replace improvements"),(0,r.kt)("p",null,"Streaming ingestion supervisors now support concurrent append, that is streaming tasks can run concurrently with a replace task (compaction or re-indexing) if it also happens to be using concurrent locks. Set the context parameter ",(0,r.kt)("inlineCode",{parentName:"p"},"useConcurrentLocks")," to true to enable concurrent append."),(0,r.kt)("p",null,"Once you update the supervisor to have ",(0,r.kt)("inlineCode",{parentName:"p"},'"useConcurrentLocks": true'),", the transition to concurrent append happens seamlessly without causing any ingestion lag or task failures."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16369"},"#16369")),(0,r.kt)("p",null,"Druid now performs active cleanup of stale pending segments by tracking the set of tasks using such pending segments.\nThis allows concurrent append and replace to upgrade only a minimal set of pending segments and thus improve performance and eliminate errors.\nAdditionally, it helps in reducing load on the metadata store."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16144"},"#16144")),(0,r.kt)("h3",{id:"grouping-on-complex-columns"},"Grouping on complex columns"),(0,r.kt)("p",null,"Druid now supports grouping on complex columns and nested arrays.\nThis means that both native queries and the MSQ task engine can group on complex columns and nested arrays while returning results."),(0,r.kt)("p",null,"Additionally, the MSQ task engine can roll up and sort on the supported complex columns, such as JSON columns, during ingestion."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16068"},"#16068"),"\n",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16322"},"#16322")),(0,r.kt)("h4",{id:"removed-zookeeper-based-segment-loading"},"Removed ZooKeeper-based segment loading"),(0,r.kt)("p",null,"ZooKeeper-based segment loading is being removed due to known issues.\nIt has been deprecated for several releases.\nRecent improvements to the Druid Coordinator have significantly enhanced performance with HTTP-based segment loading."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15705"},"#15705")),(0,r.kt)("h3",{id:"improved-groupby-queries"},"Improved groupBy queries"),(0,r.kt)("p",null,"Before Druid pushes realtime segments to deep storage, the segments consist of spill files.\nSegment metrics such as ",(0,r.kt)("inlineCode",{parentName:"p"},"query/segment/time")," now report on each spill file for a realtime segment, rather than for the entire segment.\nThis change eliminates the need to materialize results on the heap, which improves the performance of groupBy queries."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15757"},"#15757")),(0,r.kt)("h3",{id:"improved-and-filter-performance"},"Improved AND filter performance"),(0,r.kt)("p",null,"Druid query processing now adaptively determines when children of AND filters should compute indexes and when to simply match rows during the scan based on selectivity of other filters.\nKnown as filter partitioning, it can result in dramatic performance increases, depending on the order of filters in the query."),(0,r.kt)("p",null,"For example, take a query like ",(0,r.kt)("inlineCode",{parentName:"p"},"SELECT SUM(longColumn) FROM druid.table WHERE stringColumn1 = '1000' AND stringColumn2 LIKE '%1%'"),". Previously, Druid used indexes when processing filters if they are available.\nThat's not always ideal; imagine if ",(0,r.kt)("inlineCode",{parentName:"p"},"stringColumn1 = '1000'")," matches 100 rows. With indexes, we have to find every value of ",(0,r.kt)("inlineCode",{parentName:"p"},"stringColumn2 LIKE '%1%'")," that is true to compute the indexes for the filter. If ",(0,r.kt)("inlineCode",{parentName:"p"},"stringColumn2")," has more than 100 values, it ends up being worse than simply checking for a match in those 100 remaining rows."),(0,r.kt)("p",null,"With the new logic, Druid now checks the selectivity of indexes as it processes each clause of the AND filter.\nIf it determines it would take more work to compute the index than to match the remaining rows, Druid skips computing the index."),(0,r.kt)("p",null,"The order you write filters in a WHERE clause of a query can improve the performance of your query.\nMore improvements are coming, but you can try out the existing improvements by reordering a query.\nPut indexes that are less intensive to compute such as ",(0,r.kt)("inlineCode",{parentName:"p"},"IS NULL"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"="),", and comparisons (",(0,r.kt)("inlineCode",{parentName:"p"},">"),", ",(0,r.kt)("inlineCode",{parentName:"p"},">=,")," ",(0,r.kt)("inlineCode",{parentName:"p"},"<"),", and ",(0,r.kt)("inlineCode",{parentName:"p"},"<="),") near the start of AND filters so that Druid more efficiently processes your queries.\nNot ordering your filters in this way won\u2019t degrade performance from previous releases since the fallback behavior is what Druid did previously."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15838"},"#15838")),(0,r.kt)("h3",{id:"centralized-datasource-schema-alpha"},"Centralized datasource schema (alpha)"),(0,r.kt)("p",null,"You can now configure Druid to manage datasource schema centrally on the Coordinator.\nPreviously, Brokers needed to query data nodes and tasks for segment schemas.\nCentralizing datasource schemas can improve startup time for Brokers and the efficiency of your deployment."),(0,r.kt)("p",null,"To enable this feature, set the following configs:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"In your common runtime properties, set ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.centralizedDatasourceSchema.enabled")," to true."),(0,r.kt)("li",{parentName:"ul"},"If you are using MiddleManagers, you also need to set ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.indexer.fork.property.druid.centralizedDatasourceSchema.enabled")," to true in your MiddleManager runtime properties.")),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15817"},"#15817")),(0,r.kt)("h3",{id:"msq-support-for-window-functions"},"MSQ support for window functions"),(0,r.kt)("p",null,"You can now run window functions in the MSQ task engine using the context flag ",(0,r.kt)("inlineCode",{parentName:"p"},"enableWindowing:true"),"."),(0,r.kt)("p",null,"In the native engine, you must use a group by clause to enable window functions. This requirement is removed in the MSQ task engine."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15470"},"#15470"),"\n",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16229"},"#16229")),(0,r.kt)("h3",{id:"msq-support-for-google-cloud-storage"},"MSQ support for Google Cloud Storage"),(0,r.kt)("p",null,"You can now export MSQ results to a Google Cloud Storage (GCS) path by passing the function ",(0,r.kt)("inlineCode",{parentName:"p"},"google()")," as an argument to the ",(0,r.kt)("inlineCode",{parentName:"p"},"EXTERN")," function."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16051"},"#16051")),(0,r.kt)("h3",{id:"rabbitmq-extension"},"RabbitMQ extension"),(0,r.kt)("p",null,"A new RabbitMQ extension is available as a community contribution.\nThe RabbitMQ extension (",(0,r.kt)("inlineCode",{parentName:"p"},"druid-rabbit-indexing-service"),") lets you manage the creation and lifetime of rabbit indexing tasks. These indexing tasks read events from ",(0,r.kt)("a",{parentName:"p",href:"https://www.rabbitmq.com"},"RabbitMQ")," through ",(0,r.kt)("a",{parentName:"p",href:"https://www.rabbitmq.com/docs/streams#super-streams"},"super streams"),"."),(0,r.kt)("p",null,"As super streams allow exactly once delivery with full support for partitioning, they are compatible with Druid's modern ingestion algorithm, without the downsides of the prior RabbitMQ firehose."),(0,r.kt)("p",null,"Note that this uses the RabbitMQ streams feature and not a conventional exchange. You need to make sure that your messages are in a super stream before consumption. For more information, see ",(0,r.kt)("a",{parentName:"p",href:"https://www.rabbitmq.com/docs"},"RabbitMQ documentation"),"."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14137"},"#14137")),(0,r.kt)("h2",{id:"functional-area-and-related-changes"},"Functional area and related changes"),(0,r.kt)("p",null,"This section contains detailed release notes separated by areas."),(0,r.kt)("h3",{id:"web-console"},"Web console"),(0,r.kt)("h4",{id:"improved-the-supervisors-view"},"Improved the Supervisors view"),(0,r.kt)("p",null,"You can now use the ",(0,r.kt)("strong",{parentName:"p"},"Supervisors")," view to dynamically query supervisors and display additional information on newly added columns."),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"Surface more information on the supervisors view",src:a(12501).Z,width:"1715",height:"432"})),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16318"},"#16318")),(0,r.kt)("h4",{id:"search-in-tables-and-columns"},"Search in tables and columns"),(0,r.kt)("p",null,"You can now use the ",(0,r.kt)("strong",{parentName:"p"},"Query")," view to search in tables and columns."),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"Use the sidebar to search in tables and columns in Query view",src:a(97449).Z,width:"824",height:"1164"})),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15990"},"#15990")),(0,r.kt)("h4",{id:"kafka-input-format"},"Kafka input format"),(0,r.kt)("p",null,"Improved how the web console determines the input format for a Kafka source.\nInstead of defaulting to the Kafka input format for a Kafka source, the web console now only picks the Kafka input format if it detects any of the following in the Kafka sample: a key, headers, or more than one topic."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16180"},"#16180")),(0,r.kt)("h4",{id:"improved-handling-of-lookups-during-sampling"},"Improved handling of lookups during sampling"),(0,r.kt)("p",null,"Rather than sending a transform expression containing lookups to the sampler, Druid now substitutes the transform expression with a placeholder.\nThis prevents the expression from blocking the flow."),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"Change the transform expression to a placeholder",src:a(93860).Z,width:"1820",height:"1101"})),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16234"},"#16234")),(0,r.kt)("h4",{id:"other-web-console-improvements"},"Other web console improvements"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"Added the fields ",(0,r.kt)("strong",{parentName:"p"},"Avro bytes decoder")," and ",(0,r.kt)("strong",{parentName:"p"},"Proto bytes decoder")," for their input formats ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15950"},"#15950"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"Fixed an issue with the ",(0,r.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/operations/web-console#tasks"},"Tasks")," view returning incorrect values for ",(0,r.kt)("strong",{parentName:"p"},"Created time")," and ",(0,r.kt)("strong",{parentName:"p"},"Duration")," fields after the Overlord restarts ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16228"},"#16228"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"Fixed the Azure icon not rendering in the web console ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16173"},"#16173"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"Fixed the supervisor offset reset dialog in the web console ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16298"},"#16298"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"Improved the user experience when the web console is operating in manual capabilities mode ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16191"},"#16191"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"Improved the query timer as follows:"),(0,r.kt)("ul",{parentName:"li"},(0,r.kt)("li",{parentName:"ul"},"Timer isn't shown if an error happens"),(0,r.kt)("li",{parentName:"ul"},"Timer resets if changing tabs while query is running"),(0,r.kt)("li",{parentName:"ul"},"Error state is lost if tab is switched twice")),(0,r.kt)("p",{parentName:"li"},(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16235"},"#16235"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"The web console now suggests the ",(0,r.kt)("inlineCode",{parentName:"p"},"azureStorage")," input type instead of the ",(0,r.kt)("inlineCode",{parentName:"p"},"azure")," storage type ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15820"},"#15820"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"The download query detail archive option is now more resilient when the detail archive is incomplete ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16071"},"#16071"))),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("p",{parentName:"li"},"You can now set ",(0,r.kt)("inlineCode",{parentName:"p"},"maxCompactionTaskSlots")," to zero to stop compaction tasks ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15877"},"#15877")))),(0,r.kt)("h3",{id:"general-ingestion"},"General ingestion"),(0,r.kt)("h4",{id:"improved-azure-input-source"},"Improved Azure input source"),(0,r.kt)("p",null,"You can now ingest data from multiple storage accounts using the new ",(0,r.kt)("inlineCode",{parentName:"p"},"azureStorage")," input source schema. For example:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-json"},'...\n "ioConfig": {\n "type": "index_parallel",\n "inputSource": {\n "type": "azureStorage",\n "objectGlob": "**.json",\n "uris": ["azureStorage://storageAccount/container/prefix1/file.json", "azureStorage://storageAccount/container/prefix2/file2.json"]\n },\n "inputFormat": {\n "type": "json"\n },\n ...\n },\n...\n')),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15630"},"#15630")),(0,r.kt)("h4",{id:"added-a-new-config-to-azureaccountconfig"},"Added a new config to ",(0,r.kt)("inlineCode",{parentName:"h4"},"AzureAccountConfig")),(0,r.kt)("p",null,"The new config ",(0,r.kt)("inlineCode",{parentName:"p"},"storageAccountEndpointSuffix")," lets you configure the endpoint suffix so that you can override the default and connect to other endpoints, such as Azure Government."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16016"},"#16016")),(0,r.kt)("h4",{id:"data-management-api-improvements"},"Data management API improvements"),(0,r.kt)("p",null,"Improved the ",(0,r.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/api-reference/data-management-api"},"Data management API")," as follows:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Fixed a bug in the ",(0,r.kt)("inlineCode",{parentName:"li"},"markUsed")," and ",(0,r.kt)("inlineCode",{parentName:"li"},"markUnused")," APIs where an empty set of segment IDs would be inconsistently treated as null or non-null in different scenarios ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16145"},"#16145")),(0,r.kt)("li",{parentName:"ul"},"Improved the ",(0,r.kt)("inlineCode",{parentName:"li"},"markUnused")," API endpoint to handle an empty list of segment versions ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16198"},"#16198")),(0,r.kt)("li",{parentName:"ul"},"The ",(0,r.kt)("inlineCode",{parentName:"li"},"segmentIds")," filter in the Data management API payload is now parameterized in the database query ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16174"},"#16174")),(0,r.kt)("li",{parentName:"ul"},"You can now mark segments as used or unused within the specified interval using an optional list of versions.\nFor example: ",(0,r.kt)("inlineCode",{parentName:"li"},"(interval, [versions])"),". When ",(0,r.kt)("inlineCode",{parentName:"li"},"versions")," is unspecified, all versions of segments in the ",(0,r.kt)("inlineCode",{parentName:"li"},"interval")," are marked as used or unused, preserving the old behavior ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16141"},"#16141"))),(0,r.kt)("h4",{id:"nested-columns-performance-improvement"},"Nested columns performance improvement"),(0,r.kt)("p",null,"Nested column serialization now releases nested field compression buffers as soon as the nested field serialization is complete, which requires significantly less direct memory during segment serialization when many nested fields are present."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16076"},"#16076")),(0,r.kt)("h4",{id:"improved-task-context-reporting"},"Improved task context reporting"),(0,r.kt)("p",null,"Added a new field ",(0,r.kt)("inlineCode",{parentName:"p"},"taskContext")," in the task reports of non-MSQ tasks. The change is backward compatible. The payload of this field contains the entire context used by the task during its runtime."),(0,r.kt)("p",null,"Added a new experimental interface ",(0,r.kt)("inlineCode",{parentName:"p"},"TaskContextEnricher")," to enrich context with use case specific logic."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16041"},"#16041")),(0,r.kt)("h4",{id:"other-ingestion-improvements"},"Other ingestion improvements"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Added indexer level task metrics to provide more visibility in task distribution ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15991"},"#15991")),(0,r.kt)("li",{parentName:"ul"},"Added more logging detail for S3 ",(0,r.kt)("inlineCode",{parentName:"li"},"RetryableS3OutputStream"),"\u2014","this can help to determine whether to adjust chunk size ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16117"},"#16117")),(0,r.kt)("li",{parentName:"ul"},"Added error code to failure type ",(0,r.kt)("inlineCode",{parentName:"li"},"InternalServerError")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16186"},"#16186")),(0,r.kt)("li",{parentName:"ul"},"Added a new index for pending segments table for datasource and ",(0,r.kt)("inlineCode",{parentName:"li"},"task_allocator_id")," columns ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16355"},"#16355")),(0,r.kt)("li",{parentName:"ul"},"Fixed a bug in the ",(0,r.kt)("inlineCode",{parentName:"li"},"MarkOvershadowedSegmentsAsUnused")," Coordinator duty to also consider segments that are overshadowed by a segment that requires zero replicas ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16181"},"#16181")),(0,r.kt)("li",{parentName:"ul"},"Fixed a bug where ",(0,r.kt)("inlineCode",{parentName:"li"},"numSegmentsKilled")," is reported incorrectly ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16103"},"#16103")),(0,r.kt)("li",{parentName:"ul"},"Fixed a bug where completion task reports are not being generated on ",(0,r.kt)("inlineCode",{parentName:"li"},"index_parallel")," tasks ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16042"},"#16042")),(0,r.kt)("li",{parentName:"ul"},"Fixed an issue where concurrent replace skipped intervals locked by append locks during compaction ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16316"},"#16316")),(0,r.kt)("li",{parentName:"ul"},"Improved error messages when supervisor's checkpoint state is invalid ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16208"},"#16208")),(0,r.kt)("li",{parentName:"ul"},"Improved serialization of ",(0,r.kt)("inlineCode",{parentName:"li"},"TaskReportMap")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16217"},"#16217")),(0,r.kt)("li",{parentName:"ul"},"Improved compaction segment read and published fields to include sequential compaction tasks ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16171"},"#16171")),(0,r.kt)("li",{parentName:"ul"},"Improved kill task so that it now accepts an optional list of unused segment versions to delete ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15994"},"#15994")),(0,r.kt)("li",{parentName:"ul"},"Improved logging when ingestion tasks try to get lookups from the Coordinator at startup ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16287"},"#16287")),(0,r.kt)("li",{parentName:"ul"},"Improved ingestion performance by parsing an input stream directly instead of converting it to a string and parsing the string as JSON ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15693"},"#15693")),(0,r.kt)("li",{parentName:"ul"},"Improved the creation of input row filter predicate in various batch tasks ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16196"},"#16196")),(0,r.kt)("li",{parentName:"ul"},"Improved how Druid fetches tasks from the Overlord to redact credentials ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16182"},"#16182")),(0,r.kt)("li",{parentName:"ul"},"Optimized ",(0,r.kt)("inlineCode",{parentName:"li"},"isOvershadowed")," when there is a unique minor version for an interval ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15952"},"#15952")),(0,r.kt)("li",{parentName:"ul"},"Removed ",(0,r.kt)("inlineCode",{parentName:"li"},"EntryExistsException")," thrown when trying to insert a duplicate task in the metadata store","\u2014","Druid now throws a ",(0,r.kt)("inlineCode",{parentName:"li"},"DruidException")," with error code ",(0,r.kt)("inlineCode",{parentName:"li"},"entryAlreadyExists")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/14448"},"#14448")),(0,r.kt)("li",{parentName:"ul"},"The task status output for a failed task now includes the exception message ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16286"},"#16286"))),(0,r.kt)("h3",{id:"sql-based-ingestion"},"SQL-based ingestion"),(0,r.kt)("h4",{id:"manifest-files-for-msq-task-engine-exports"},"Manifest files for MSQ task engine exports"),(0,r.kt)("p",null,"Export queries that use the MSQ task engine now also create a manifest file at the destination, which lists the files created by the query."),(0,r.kt)("p",null,"During a rolling update, older versions of workers don't return a list of exported files, and older Controllers don't create a manifest file.\nTherefore, export queries ran during this time might have incomplete manifests."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15953"},"#15953")),(0,r.kt)("h4",{id:"sortmerge-join-support"},(0,r.kt)("inlineCode",{parentName:"h4"},"SortMerge")," join support"),(0,r.kt)("p",null,"Druid now supports ",(0,r.kt)("inlineCode",{parentName:"p"},"SortMerge")," join for ",(0,r.kt)("inlineCode",{parentName:"p"},"IS NOT DISTINCT FROM")," operations."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16003"},"#16003")),(0,r.kt)("h4",{id:"state-of-compaction-context-parameter"},"State of compaction context parameter"),(0,r.kt)("p",null,"Added a new context parameter ",(0,r.kt)("inlineCode",{parentName:"p"},"storeCompactionState"),".\nWhen set to ",(0,r.kt)("inlineCode",{parentName:"p"},"true"),", Druid records the state of compaction for each segment in the ",(0,r.kt)("inlineCode",{parentName:"p"},"lastCompactionState")," segment field."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15965"},"#15965")),(0,r.kt)("h4",{id:"selective-loading-of-lookups"},"Selective loading of lookups"),(0,r.kt)("p",null,"We have built the foundation of selective lookup loading. As part of this improvement, ",(0,r.kt)("inlineCode",{parentName:"p"},"KillUnusedSegmentsTask")," no longer loads lookups."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16328"},"#16328")),(0,r.kt)("h4",{id:"msq-task-report-improvements"},"MSQ task report improvements"),(0,r.kt)("p",null,"Improved the task report for the MSQ task engine as follows:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"A new field in the MSQ task report captures the milliseconds elapsed between when the worker task was first requested and when it fully started running. Actual work time can be calculated using ",(0,r.kt)("inlineCode",{parentName:"li"},"actualWorkTimeMS = durationMs - pendingMs")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15966"},"#15966")),(0,r.kt)("li",{parentName:"ul"},"A new field ",(0,r.kt)("inlineCode",{parentName:"li"},"segmentReport")," logs the type of the segment created and the reason behind the selection ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16175"},"#16175"))),(0,r.kt)("h4",{id:"other-sql-based-ingestion-improvements"},"Other SQL-based ingestion improvements"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Changed the controller checker for the MSQ task engine to check for closed only ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16161"},"#16161")),(0,r.kt)("li",{parentName:"ul"},"Fixed an incorrect check while generating MSQ task engine error report ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16273"},"#16273")),(0,r.kt)("li",{parentName:"ul"},"Improved the message you get when the MSQ task engine falls back to a broadcast join from a sort-merge ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16002"},"#16002")),(0,r.kt)("li",{parentName:"ul"},"Improved the speed of worker cancellation by bypassing unnecessary communication with the controller ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16158"},"#16158")),(0,r.kt)("li",{parentName:"ul"},"Improved the error message you get when there's an issue with your PARTITIONED BY clause ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15961"},"#15961")),(0,r.kt)("li",{parentName:"ul"},"Runtime exceptions generated while writing frames now include the name of the column where they occurred ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16130"},"#16130"))),(0,r.kt)("h3",{id:"streaming-ingestion"},"Streaming ingestion"),(0,r.kt)("h4",{id:"streaming-completion-reports"},"Streaming completion reports"),(0,r.kt)("p",null,"Streaming task completion reports now have an extra field ",(0,r.kt)("inlineCode",{parentName:"p"},"recordsProcessed"),", which lists all the partitions processed by that task and a count of records for each partition.\nUse this field to see the actual throughput of tasks and make decision as to whether you should vertically or horizontally scale your workers."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15930"},"#15930")),(0,r.kt)("h4",{id:"improved-memory-management-for-kinesis"},"Improved memory management for Kinesis"),(0,r.kt)("p",null,"Kinesis ingestion memory tuning config is now simpler:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"You no longer need to set the configs ",(0,r.kt)("inlineCode",{parentName:"li"},"recordsPerFetch")," and ",(0,r.kt)("inlineCode",{parentName:"li"},"deaggregate"),"."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"fetchThreads")," can no longer exceed the budgeted amount of heap (100 MB or 5%)."),(0,r.kt)("li",{parentName:"ul"},"Use ",(0,r.kt)("inlineCode",{parentName:"li"},"recordBufferSizeBytes")," to set a byte-based limit rather than records-based limit for the Kinesis fetch threads and main ingestion threads. We recommend setting this to 100 MB or 10% of heap, whichever is smaller."),(0,r.kt)("li",{parentName:"ul"},"Use ",(0,r.kt)("inlineCode",{parentName:"li"},"maxBytesPerPoll")," to set a byte-based limit for how much data Druid polls from shared buffer at a time. Default is 1,000,000 bytes.")),(0,r.kt)("p",null,"As part of this change, the following properties have been deprecated:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"recordBufferSize"),", use ",(0,r.kt)("inlineCode",{parentName:"li"},"recordBufferSizeBytes")," instead"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"maxRecordsPerPoll"),", use ",(0,r.kt)("inlineCode",{parentName:"li"},"maxBytesPerPoll")," instead")),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15360"},"#15360")),(0,r.kt)("h4",{id:"improved-autoscaling-for-kinesis-streams"},"Improved autoscaling for Kinesis streams"),(0,r.kt)("p",null,"The Kinesis autoscaler now considers max lag in minutes instead of total lag.\nTo maintain backwards compatibility, this change is opt-in for existing Kinesis connections.\nTo opt in, set ",(0,r.kt)("inlineCode",{parentName:"p"},"lagBased.lagAggregate")," in your supervisor spec to ",(0,r.kt)("inlineCode",{parentName:"p"},"MAX"),".\nNew connections use max lag by default."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16284"},"#16284"),"\n",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16314"},"#16314")),(0,r.kt)("h4",{id:"parallelized-incremental-segment-creation"},"Parallelized incremental segment creation"),(0,r.kt)("p",null,"You can now configure the number of threads used to create and persist incremental segments on the disk using the ",(0,r.kt)("inlineCode",{parentName:"p"},"numPersistThreads")," property.\nUse additional threads to parallelize the segment creation to prevent ingestion from stalling or pausing frequently as long as there are sufficient CPU resources available."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13982/files"},"#13982")),(0,r.kt)("h4",{id:"kafka-steaming-supervisor-topic-improvement"},"Kafka steaming supervisor topic improvement"),(0,r.kt)("p",null,"Druid now properly handles previously found partition offsets.\nPrior to this change, updating a Kafka streaming supervisor topic from single to multi-topic (pattern), or vice versa, could cause old offsets to be ignored spuriously."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16190"},"#16190")),(0,r.kt)("h3",{id:"querying"},"Querying"),(0,r.kt)("h4",{id:"dynamic-table-append"},"Dynamic table append"),(0,r.kt)("p",null,"You can now use the ",(0,r.kt)("inlineCode",{parentName:"p"},"TABLE(APPEND(...))")," function to implicitly create unions based on table schemas."),(0,r.kt)("p",null,"For example, the following queries are equivalent:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-sql"},"SELECT * FROM TABLE(APPEND('table1','table2','table3'))\n")),(0,r.kt)("p",null,"and"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-sql"},"SELECT column1,NULL AS column2,NULL AS column3 FROM table1\nUNION ALL\nSELECT NULL AS column1,column2,NULL AS column3 FROM table2\nUNION ALL\nSELECT column1,column2,column3 FROM table3\n")),(0,r.kt)("p",null,"Note that if the same columns are defined with different input types, Druid uses the least restrictive column type."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15897"},"#15897")),(0,r.kt)("h4",{id:"added-scalar_in_array-function"},"Added SCALAR_IN_ARRAY function"),(0,r.kt)("p",null,"Added ",(0,r.kt)("inlineCode",{parentName:"p"},"SCALAR_IN_ARRAY")," function for checking if a scalar expression appears in an array:"),(0,r.kt)("p",null,(0,r.kt)("inlineCode",{parentName:"p"},"SCALAR_IN_ARRAY(expr, arr)")),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16306"},"#16306")),(0,r.kt)("h4",{id:"improved-partitioned-by"},"Improved PARTITIONED BY"),(0,r.kt)("p",null,"If you use the MSQ task engine to run queries, you can now use the following strings in addition to the supported ISO 8601 periods:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"HOUR")," - Same as ",(0,r.kt)("inlineCode",{parentName:"li"},"'PT1H'")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"DAY")," - Same as ",(0,r.kt)("inlineCode",{parentName:"li"},"'P1D'")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"MONTH")," - Same as ",(0,r.kt)("inlineCode",{parentName:"li"},"'P1M'")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"YEAR")," - Same as ",(0,r.kt)("inlineCode",{parentName:"li"},"'P1Y'")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"ALL TIME")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"ALL")," - Alias for ",(0,r.kt)("inlineCode",{parentName:"li"},"ALL TIME"))),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15836/"},"#15836")),(0,r.kt)("h4",{id:"improved-catalog-tables"},"Improved catalog tables"),(0,r.kt)("p",null,"You can validate complex target column types against source input expressions during DML INSERT/REPLACE operations."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16223"},"#16223")),(0,r.kt)("p",null,"You can now define catalog tables without explicit segment granularities.\nDML queries on such tables need to have the PARTITIONED BY clause specified.\nAlternatively, you can update the table to include a defined segment granularity for DML queries to be validated properly."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16278"},"#16278")),(0,r.kt)("h4",{id:"double-and-null-values-in-sql-type-array"},"Double and null values in SQL type ARRAY"),(0,r.kt)("p",null,"You can now pass double and null values in SQL type ARRAY through dynamic parameters."),(0,r.kt)("p",null,"For example:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-json"},'"parameters": [\n {\n "type": "ARRAY",\n "value": [d1, d2, null]\n }\n]\n')),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16274"},"#16274")),(0,r.kt)("h4",{id:"typedinfilter-filter"},(0,r.kt)("inlineCode",{parentName:"h4"},"TypedInFilter")," filter"),(0,r.kt)("p",null,"Added a new ",(0,r.kt)("inlineCode",{parentName:"p"},"TypedInFilter")," filter to replace ",(0,r.kt)("inlineCode",{parentName:"p"},"InDimFilter"),"\u2014","to improve performance when matching numeric columns."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16039"},"#16039")),(0,r.kt)("p",null,(0,r.kt)("inlineCode",{parentName:"p"},"TypedInFilter")," can run in replace-with-default mode."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16233"},"#16233")),(0,r.kt)("h4",{id:"heap-dictionaries-clear-out"},"Heap dictionaries clear out"),(0,r.kt)("p",null,"Improved object handling to reduce the chances of running out of memory with Group By queries on high cardinality data."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16114"},"#16114")),(0,r.kt)("h4",{id:"other-querying-improvements"},"Other querying improvements"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Added support for numeric arrays to window functions and subquery materializations ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15917"},"#15917")),(0,r.kt)("li",{parentName:"ul"},"Added support for single value aggregated groupBy queries for scalars ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15700"},"#15700")),(0,r.kt)("li",{parentName:"ul"},"Added support for column reordering with scan and sort style queries ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15815"},"#15815")),(0,r.kt)("li",{parentName:"ul"},"Added support for using MV_FILTER_ONLY and MV_FILTER_NONE functions with a non-literal argument ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16113"},"#16113")),(0,r.kt)("li",{parentName:"ul"},"Added the ",(0,r.kt)("inlineCode",{parentName:"li"},"radiusUnit")," element to the ",(0,r.kt)("inlineCode",{parentName:"li"},"radius")," bound ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16029"},"#16029")),(0,r.kt)("li",{parentName:"ul"},"Fixed the return type for the IPV4_PARSE function. The function now correctly returns null if the string literal can't be represented as an IPv4 address ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15916"},"#15916")),(0,r.kt)("li",{parentName:"ul"},"Fixed an issue where several aggregators returned UNKNOWN or OTHER as their SQL type inference ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16216"},"#16216")),(0,r.kt)("li",{parentName:"ul"},"Fixed an issue where triggering a math expression processor on a segment that lacks a specific column results in an ",(0,r.kt)("inlineCode",{parentName:"li"},"Unable to vectorize expression")," exception ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16128"},"#16128")),(0,r.kt)("li",{parentName:"ul"},"Fixed error while loading lookups from an empty JDBC source ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16307"},"#16307")),(0,r.kt)("li",{parentName:"ul"},"Fixed ",(0,r.kt)("inlineCode",{parentName:"li"},"ColumnType")," to ",(0,r.kt)("inlineCode",{parentName:"li"},"RelDataType")," conversion for nested arrays ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16138"},"#16138")),(0,r.kt)("li",{parentName:"ul"},"Fixed ",(0,r.kt)("inlineCode",{parentName:"li"},"WindowingscanAndSort")," query issues on top of Joins ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15996"},"#15996")),(0,r.kt)("li",{parentName:"ul"},"Fixed ",(0,r.kt)("inlineCode",{parentName:"li"},"REGEXP_LIKE"),", ",(0,r.kt)("inlineCode",{parentName:"li"},"CONTAINS_STRING"),", and ",(0,r.kt)("inlineCode",{parentName:"li"},"ICONTAINS_STRING")," so that they correctly return null for null value inputs in ANSI SQL compatible null handling mode (the default configuration). Previously, they returned false ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15963"},"#15963")),(0,r.kt)("li",{parentName:"ul"},"Fixed issues with ",(0,r.kt)("inlineCode",{parentName:"li"},"ARRAY_CONTAINS")," and ",(0,r.kt)("inlineCode",{parentName:"li"},"ARRAY_OVERLAP")," with null left side arguments as well as ",(0,r.kt)("inlineCode",{parentName:"li"},"MV_CONTAINS")," and ",(0,r.kt)("inlineCode",{parentName:"li"},"MV_OVERLAP")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15974"},"#15974")),(0,r.kt)("li",{parentName:"ul"},"Fixed an issue which can occur when using schema auto-discovery on columns with a mix of array and scalar values and querying with scan queries ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16105"},"#16105")),(0,r.kt)("li",{parentName:"ul"},"Fixed windowed aggregates so that they update the aggregation value based on the final compute ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16244"},"#16244")),(0,r.kt)("li",{parentName:"ul"},"Fixed issues with the first/last vector aggregators ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16230"},"#16230")),(0,r.kt)("li",{parentName:"ul"},"Fixed an issue where groupBy queries that have ",(0,r.kt)("inlineCode",{parentName:"li"},"bit_xor() is null")," return the wrong result ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16237"},"#16237")),(0,r.kt)("li",{parentName:"ul"},"Fixed an issue where Broker merge buffers get into a deadlock when multiple simultaneous queries use them ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15420"},"#15420")),(0,r.kt)("li",{parentName:"ul"},"Fixed a mapping issue in window functions where two nodes get the same reference ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16301"},"#16301")),(0,r.kt)("li",{parentName:"ul"},"Improved processing of index backed OR expressions ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16300"},"#16300")),(0,r.kt)("li",{parentName:"ul"},"Improved performance for real-time queries using the MSQ task engine. Segments served by the same server are now grouped together, resulting in more efficient query handling ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15399"},"#15399")),(0,r.kt)("li",{parentName:"ul"},"Improved strict NON NULL return type checks ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16279"},"#16279")),(0,r.kt)("li",{parentName:"ul"},"Improved array handling for Booleans to account for queries such as ",(0,r.kt)("inlineCode",{parentName:"li"},"select array[true, false] from datasource")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16093"},"#16093")),(0,r.kt)("li",{parentName:"ul"},"Improved how scalars work in arrays ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16311"},"#16311")),(0,r.kt)("li",{parentName:"ul"},"Improved LIKE filtering performance with multiple wildcards by not using ",(0,r.kt)("inlineCode",{parentName:"li"},"java.util.regex.Pattern")," to match ",(0,r.kt)("inlineCode",{parentName:"li"},"%")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16153"},"#16153")),(0,r.kt)("li",{parentName:"ul"},"Modified the ",(0,r.kt)("inlineCode",{parentName:"li"},"IndexedTable")," to reject building the index on the complex types to prevent joining on complex types ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16349"},"#16349")),(0,r.kt)("li",{parentName:"ul"},"Restored ",(0,r.kt)("inlineCode",{parentName:"li"},"enableWindowing")," context parameter for window functions ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16229"},"#16229"))),(0,r.kt)("h3",{id:"cluster-management"},"Cluster management"),(0,r.kt)("h4",{id:"improved-retrieving-active-task-status"},"Improved retrieving active task status"),(0,r.kt)("p",null,"Improved performance of the Overlord API ",(0,r.kt)("inlineCode",{parentName:"p"},"/indexer/v1/taskStatus")," by serving status of active tasks from memory rather than querying the metadata."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15724"},"#15724")),(0,r.kt)("h4",{id:"other-cluster-management-improvements"},"Other cluster management improvements"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Adjusted salt size for ",(0,r.kt)("inlineCode",{parentName:"li"},"Pac4jSessionStore")," to 128 bits, which is FIPS compliant ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15758"},"#15758")),(0,r.kt)("li",{parentName:"ul"},"Improved Connection Count server select strategy to account for slow connection requests ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15975"},"#15975"))),(0,r.kt)("h3",{id:"data-management"},"Data management"),(0,r.kt)("h4",{id:"changes-to-coordinator-default-values"},"Changes to Coordinator default values"),(0,r.kt)("p",null,"Changed to the default values for the Coordinator service as follows:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"The default value of ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.kill.period")," has been changed from ",(0,r.kt)("inlineCode",{parentName:"li"},"P1D")," to the runtime value of ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.period.indexingPeriod"),". This default value can be overridden by explicitly specifying ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.kill.period")," in the Coordinator runtime properties."),(0,r.kt)("li",{parentName:"ul"},"The default value for the dynamic configuration property ",(0,r.kt)("inlineCode",{parentName:"li"},"killTaskSlotRatio")," has been updated from ",(0,r.kt)("inlineCode",{parentName:"li"},"1.0")," to ",(0,r.kt)("inlineCode",{parentName:"li"},"0.1"),". This ensures that kill tasks take up at least one task slot and at most 10% of all available task slots by default.")),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16247"},"#16247")),(0,r.kt)("h4",{id:"compaction-completion-reports"},"Compaction completion reports"),(0,r.kt)("p",null,"Parallel compaction task completion reports now have ",(0,r.kt)("inlineCode",{parentName:"p"},"segmentsRead")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"segmentsPublished")," fields to show how effective a compaction task is."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15947"},"#15947")),(0,r.kt)("h4",{id:"googletasklogs-upload-buffer-size"},(0,r.kt)("inlineCode",{parentName:"h4"},"GoogleTaskLogs")," upload buffer size"),(0,r.kt)("p",null,"Changed the upload buffer size in ",(0,r.kt)("inlineCode",{parentName:"p"},"GoogleTaskLogs")," to 1 MB instead of 15 MB to allow more uploads in parallel and prevent the MiddleManager service from running out of memory."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16236"},"#16236")),(0,r.kt)("h4",{id:"other-data-management-improvements"},"Other data management improvements"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Improved compaction task reports. They can now contain multiple sets of segment output reports instead of overwriting previous reports ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15981/"},"#15981")),(0,r.kt)("li",{parentName:"ul"},"Improved segment killing in Azure to be faster ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15770"},"#15770")),(0,r.kt)("li",{parentName:"ul"},"Improved the retry behavior for deep storage connections ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15938"},"#15938")),(0,r.kt)("li",{parentName:"ul"},"Improved segment creation so that all segments created in a batch have the same ",(0,r.kt)("inlineCode",{parentName:"li"},"created_date")," entry, which can help in troubleshooting ingestion issues ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15977"},"#15977")),(0,r.kt)("li",{parentName:"ul"},"Improved how Druid parses JSON by using ",(0,r.kt)("inlineCode",{parentName:"li"},"charsetFix")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16212"},"#16212"))),(0,r.kt)("h3",{id:"metrics-and-monitoring"},"Metrics and monitoring"),(0,r.kt)("h4",{id:"new-unused-segment-metric"},"New unused segment metric"),(0,r.kt)("p",null,"You can now use the ",(0,r.kt)("inlineCode",{parentName:"p"},"kill/eligibleUnusedSegments/count")," metric to find the number of unused segments of a datasource that are identified as eligible for deletion from the metadata store by the Coordinator."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15941"},"#15941")," ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15977"},"#15977")),(0,r.kt)("h4",{id:"kafka-emitter-improvements"},"Kafka emitter improvements"),(0,r.kt)("p",null,"You can now set custom dimensions for events emitted by the Kafka emitter as a JSON map for the ",(0,r.kt)("inlineCode",{parentName:"p"},"druid.emitter.kafka.extra.dimensions")," property.\nFor example, ",(0,r.kt)("inlineCode",{parentName:"p"},'druid.emitter.kafka.extra.dimensions={"region":"us-east-1","environment":"preProd"}'),"."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15845"},"#15845")),(0,r.kt)("h4",{id:"prometheus-emitter-improvements"},"Prometheus emitter improvements"),(0,r.kt)("p",null,"The Prometheus emitter extension now emits ",(0,r.kt)("inlineCode",{parentName:"p"},"service/heartbeat")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"zk-connected")," metrics."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16209"},"#16209")),(0,r.kt)("p",null,"Also added the following missing metrics to the default Prometheus emitter mapping: ",(0,r.kt)("inlineCode",{parentName:"p"},"query/timeout/count"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"mergeBuffer/pendingRequests"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"ingest/events/processedWithError"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"ingest/notices/queueSize")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"segment/count"),"."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16329"},"#16329")),(0,r.kt)("h4",{id:"statsd-emitter-improvements"},"StatsD emitter improvements"),(0,r.kt)("p",null,"You can now configure ",(0,r.kt)("inlineCode",{parentName:"p"},"queueSize"),",",(0,r.kt)("inlineCode",{parentName:"p"},"poolSize"),",",(0,r.kt)("inlineCode",{parentName:"p"},"processorWorkers"),", and ",(0,r.kt)("inlineCode",{parentName:"p"},"senderWorkers")," parameters for the StatsD emitter.\nUse these parameters to increase the capacity of the StatsD client when its queue size is full."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16283"},"#16283")),(0,r.kt)("h4",{id:"improved-segmentunavailablecount-metric"},"Improved ",(0,r.kt)("inlineCode",{parentName:"h4"},"segment/unavailable/count")," metric"),(0,r.kt)("p",null,"The ",(0,r.kt)("inlineCode",{parentName:"p"},"segment/unavailable/count")," metric now accounts for segments that can be queried from deep storage (",(0,r.kt)("inlineCode",{parentName:"p"},"replicaCount=0"),")."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16020"},"#16020")),(0,r.kt)("p",null,"Added a new metric ",(0,r.kt)("inlineCode",{parentName:"p"},"segment/deepStorage/count")," to support the query from deep storage feature."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16072"},"#16072")),(0,r.kt)("h4",{id:"other-metrics-and-monitoring-improvements"},"Other metrics and monitoring improvements"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Added a new ",(0,r.kt)("inlineCode",{parentName:"li"},"task/autoScaler/requiredCount")," metric that provides a count of required tasks based on the calculations of the ",(0,r.kt)("inlineCode",{parentName:"li"},"lagBased")," autoscaler. Compare that value to ",(0,r.kt)("inlineCode",{parentName:"li"},"task/running/count")," to discover the difference between the current and desired task counts ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16199"},"#16199")),(0,r.kt)("li",{parentName:"ul"},"Added ",(0,r.kt)("inlineCode",{parentName:"li"},"jvmVersion")," dimension to the ",(0,r.kt)("inlineCode",{parentName:"li"},"JvmMonitor")," module ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16262"},"#16262")),(0,r.kt)("li",{parentName:"ul"},"Exposed Kinesis lag metrics for use in alerts ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16172"},"#16172")),(0,r.kt)("li",{parentName:"ul"},"Fixed an issue with metric emission in the segment generation phase ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16146"},"#16146"))),(0,r.kt)("h3",{id:"extensions"},"Extensions"),(0,r.kt)("h4",{id:"microsoft-azure-improvements"},"Microsoft Azure improvements"),(0,r.kt)("p",null,"You can now use ingestion payloads larger than 1 MB for Azure."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15695"},"#15695")),(0,r.kt)("h4",{id:"kubernetes-improvements"},"Kubernetes improvements"),(0,r.kt)("p",null,"You can now configure the CPU cores for Peons (Kubernetes jobs) using the Overlord property ",(0,r.kt)("inlineCode",{parentName:"p"},"druid.indexer.runner.cpuCoreInMicro"),"."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16008"},"#16008")),(0,r.kt)("h4",{id:"delta-lake-improvements"},"Delta Lake improvements"),(0,r.kt)("p",null,"You can use these filters to filter out data files from a snapshot, reducing the number of files Druid has to ingest from a Delta table."),(0,r.kt)("p",null,"For more information, see ",(0,r.kt)("a",{parentName:"p",href:"/docs/latest/ingestion/input-sources#delta-filter-object"},"Delta filter object"),"."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16288"},"#16288")),(0,r.kt)("p",null,"Also added a text box for the Delta Lake filter to the web console.\nThe text box accepts an optional JSON object that is passed down as the ",(0,r.kt)("inlineCode",{parentName:"p"},"filter")," to the delta input source."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16379"},"#16379")),(0,r.kt)("h4",{id:"improve-performance-of-ldap-credentials-validator"},"Improve performance of LDAP credentials validator"),(0,r.kt)("p",null,"Improved performance of LDAP credentials validator by keeping password hashes in an in-memory cache. This helps avoid re-computation of password hashes, thus speeding up the process of LDAP-based Druid authentication."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15993"},"#15993")),(0,r.kt)("h2",{id:"upgrade-notes-and-incompatible-changes"},"Upgrade notes and incompatible changes"),(0,r.kt)("h3",{id:"upgrade-notes"},"Upgrade notes"),(0,r.kt)("h4",{id:"append-jsonpath-function"},"Append JsonPath function"),(0,r.kt)("p",null,"The ",(0,r.kt)("inlineCode",{parentName:"p"},"append")," function for JsonPath for ORC format now fails with an exception. Previously, it would run but not append anything."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15772"},"#15772")),(0,r.kt)("h4",{id:"kinesis-ingestion-tuning"},"Kinesis ingestion tuning"),(0,r.kt)("p",null,"The following properties have been deprecated as part of simplifying the memory tuning for Kinesis ingestion:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"recordBufferSize"),", use ",(0,r.kt)("inlineCode",{parentName:"li"},"recordBufferSizeBytes")," instead"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"maxRecordsPerPoll"),", use ",(0,r.kt)("inlineCode",{parentName:"li"},"maxBytesPerPoll")," instead")),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15360"},"#15360")),(0,r.kt)("h4",{id:"improved-supervisor-rolling-restarts"},"Improved Supervisor rolling restarts"),(0,r.kt)("p",null,"The ",(0,r.kt)("inlineCode",{parentName:"p"},"stopTaskCount")," config now prioritizes stopping older tasks first. As part of this change, you must also explicitly set a value for ",(0,r.kt)("inlineCode",{parentName:"p"},"stopTaskCount"),". It no longer defaults to the same value as ",(0,r.kt)("inlineCode",{parentName:"p"},"taskCount"),"."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15859"},"#15859")),(0,r.kt)("h4",{id:"changes-to-coordinator-default-values-1"},"Changes to Coordinator default values"),(0,r.kt)("p",null,"Changed the following default values for the Coordinator service:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"The default value for ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.kill.period")," (if unspecified) has changed from ",(0,r.kt)("inlineCode",{parentName:"li"},"P1D")," to the value of ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.period.indexingPeriod"),". Operators can choose to override ",(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.kill.period")," and that takes precedence over the default behavior."),(0,r.kt)("li",{parentName:"ul"},"The default value for the dynamic configuration property ",(0,r.kt)("inlineCode",{parentName:"li"},"killTaskSlotRatio")," has been updated from ",(0,r.kt)("inlineCode",{parentName:"li"},"1.0")," to ",(0,r.kt)("inlineCode",{parentName:"li"},"0.1"),". This ensures that kill tasks take up only one task slot by default instead of consuming all available task slots.")),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16247"},"#16247")),(0,r.kt)("h4",{id:"googletasklogs-upload-buffer-size-1"},(0,r.kt)("inlineCode",{parentName:"h4"},"GoogleTaskLogs")," upload buffer size"),(0,r.kt)("p",null,"Changed the upload buffer size in ",(0,r.kt)("inlineCode",{parentName:"p"},"GoogleTaskLogs")," to 1 MB instead of 15 MB to allow more uploads in parallel and prevent the MiddleManager service from running out of memory."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16236"},"#16236")),(0,r.kt)("h3",{id:"incompatible-changes"},"Incompatible changes"),(0,r.kt)("h4",{id:"changes-to-targetdatasource-in-explain-queries"},"Changes to ",(0,r.kt)("inlineCode",{parentName:"h4"},"targetDataSource")," in EXPLAIN queries"),(0,r.kt)("p",null,"Druid 30.0.0 includes a breaking change that restores the behavior for ",(0,r.kt)("inlineCode",{parentName:"p"},"targetDataSource")," to its 28.0.0 and earlier state, different from Druid 29.0.0 and only 29.0.0. In 29.0.0, ",(0,r.kt)("inlineCode",{parentName:"p"},"targetDataSource")," returns a JSON object that includes the datasource name. In all other versions, ",(0,r.kt)("inlineCode",{parentName:"p"},"targetDataSource")," returns a string containing the name of the datasource."),(0,r.kt)("p",null,"If you're upgrading from any version other than 29.0.0, there is no change in behavior."),(0,r.kt)("p",null,"If you are upgrading from 29.0.0, this is an incompatible change."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16004"},"#16004")),(0,r.kt)("h4",{id:"removed-zookeeper-based-segment-loading-1"},"Removed ZooKeeper-based segment loading"),(0,r.kt)("p",null,"ZooKeeper-based segment loading is being removed due to known issues.\nIt has been deprecated for several releases.\nRecent improvements to the Druid Coordinator have significantly enhanced performance with HTTP-based segment loading."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15705"},"#15705")),(0,r.kt)("h4",{id:"removed-coordinator-configs"},"Removed Coordinator configs"),(0,r.kt)("p",null,"Removed the following Coordinator configs:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.load.timeout"),": Not needed as the default value of this parameter (15 minutes) is known to work well for all clusters."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.loadqueuepeon.type"),": Not needed as this value is always ",(0,r.kt)("inlineCode",{parentName:"li"},"http"),"."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"druid.coordinator.curator.loadqueuepeon.numCallbackThreads"),": Not needed as ZooKeeper(curator)-based segment loading isn't an option anymore.")),(0,r.kt)("p",null,"Auto-cleanup of compaction configs of inactive datasources is now enabled by default."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15705"},"#15705")),(0,r.kt)("h4",{id:"changed-usemaxmemoryestimates-for-hadoop-jobs"},"Changed ",(0,r.kt)("inlineCode",{parentName:"h4"},"useMaxMemoryEstimates")," for Hadoop jobs"),(0,r.kt)("p",null,"The default value of the ",(0,r.kt)("inlineCode",{parentName:"p"},"useMaxMemoryEstimates")," parameter for Hadoop jobs is now ",(0,r.kt)("inlineCode",{parentName:"p"},"false"),"."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16280"},"#16280")),(0,r.kt)("h3",{id:"developer-notes"},"Developer notes"),(0,r.kt)("h4",{id:"dependency-updates"},"Dependency updates"),(0,r.kt)("p",null,"The following dependencies have had their versions bumped:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Updated Azure POM from 1.2.19 to 1.2.23 to update transitive dependency ",(0,r.kt)("inlineCode",{parentName:"li"},"nimbus-jose-jwt")," to address ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2023-52428")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16374"},"#16374")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"commons-configuration2")," from 2.8.0 to 2.10.1 to address ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2024-29131")," and ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2024-29133")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16374"},"#16374")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"bcpkix-jdk18on")," from 1.76 to 1.78.1 to address ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2024-30172"),", ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2024-30171"),", and ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2024-29857")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16374"},"#16374")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"nimbus-jose-jwt")," from 8.22.1 to 9.37.2 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16320"},"#16320")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"rewrite-maven-plugin")," from 5.23.1 to 5.27.0 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16238"},"#16238")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"rewrite-testing-frameworks")," from 2.4.1 to 2.6.0 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16238"},"#16238")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"json-path")," from 2.3.0 to 2.9.0"),(0,r.kt)("li",{parentName:"ul"},"Updated Apache Delta Lake from 3.0.0 to 3.1.0"),(0,r.kt)("li",{parentName:"ul"},"Updated Netty to ",(0,r.kt)("inlineCode",{parentName:"li"},"4.1.108.Final")," to address ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2024-29025")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16267"},"#16267")),(0,r.kt)("li",{parentName:"ul"},"Updated Apache ZooKeeper to 3.8.4 to address ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2024-23944")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16267"},"#16267")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"log4j.version")," from 2.18.0 to 2.22.1 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15934"},"#15934")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"org.apache.commons.commons-compress")," from 1.24.0 to 1.26.0 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16009"},"#16009")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"org.apache.commons.commons-codec")," from 1.16.0 to 1.16.1 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16009"},"#16009")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"org.bitbucket.b_c:jose4j")," from 0.9.3 to 0.9.6 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16078"},"#16078")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"redis.clients:jedis")," from 5.0.2 to 5.1.2 ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16074"},"#16074")),(0,r.kt)("li",{parentName:"ul"},"Updated Jetty from ",(0,r.kt)("inlineCode",{parentName:"li"},"9.4.53.v20231009")," to ",(0,r.kt)("inlineCode",{parentName:"li"},"9.4.54.v20240208")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16000"},"#16000")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"webpackdevmiddleware")," from 5.3.3 to 5.3.4 in web console ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16195"},"#16195")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"express")," from 4.18.2 to 4.19.2 in web console ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16204"},"#16204")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"druid-toolkit/query")," from 0.21.9 to 0.22.11 in web console ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16213"},"#16213")),(0,r.kt)("li",{parentName:"ul"},"Updated ",(0,r.kt)("inlineCode",{parentName:"li"},"follow-redirects")," from 1.15.1 to 1.15.4 in web console ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16134"},"#16134")),(0,r.kt)("li",{parentName:"ul"},"Updated Axios from 0.26.1 to 0.28.0 in web console ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16087"},"#16087")),(0,r.kt)("li",{parentName:"ul"},"Removed the ",(0,r.kt)("inlineCode",{parentName:"li"},"aws-sdk")," transitive dependency to reduce the size of the compiled Ranger extension ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16011"},"#16011")),(0,r.kt)("li",{parentName:"ul"},"Removed end of life ",(0,r.kt)("inlineCode",{parentName:"li"},"log4j v1")," dependencies ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/15984"},"#15984")),(0,r.kt)("li",{parentName:"ul"},"Suppressed errors for the following CVEs: ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2023-52428(7.5)"),", ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2023-50291(7.5)"),", ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2023-50298(7.5)"),", ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2023-50386(8.8)"),", and ",(0,r.kt)("inlineCode",{parentName:"li"},"CVE-2023-50292(7.5)")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/16147"},"#16147"))))}c.isMDXComponent=!0},97449:(e,t,a)=>{a.d(t,{Z:()=>n});const n=a.p+"assets/images/30.0.0-console-search-3e81d37eccd76b2563d243ad0e7d6f8b.png"},93860:(e,t,a)=>{a.d(t,{Z:()=>n});const n=a.p+"assets/images/30.0.0-sampler-lookups-482179622c3ccc20196b0c44a3a781c9.png"},12501:(e,t,a)=>{a.d(t,{Z:()=>n});const n=a.p+"assets/images/30.0.0-supervisors-93717f6a0e8e8789349b10f04709158e.png"}}]);