blob: 0c1ee483f4f4a27c630da5a6b9d51208fcc3343f [file] [log] [blame]
"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[2703],{3905:(e,t,a)=>{a.d(t,{Zo:()=>u,kt:()=>h});var n=a(67294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function o(e){for(var t=1;t<arguments.length;t++){var a=null!=arguments[t]?arguments[t]:{};t%2?i(Object(a),!0).forEach((function(t){r(e,t,a[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(a)):i(Object(a)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(a,t))}))}return e}function l(e,t){if(null==e)return{};var a,n,r=function(e,t){if(null==e)return{};var a,n,r={},i=Object.keys(e);for(n=0;n<i.length;n++)a=i[n],t.indexOf(a)>=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n<i.length;n++)a=i[n],t.indexOf(a)>=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var s=n.createContext({}),d=function(e){var t=n.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},u=function(e){var t=d(e.components);return n.createElement(s.Provider,{value:t},e.children)},p="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,i=e.originalType,s=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),p=d(a),c=r,h=p["".concat(s,".").concat(c)]||p[c]||m[c]||i;return a?n.createElement(h,o(o({ref:t},u),{},{components:a})):n.createElement(h,o({ref:t},u))}));function h(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=a.length,o=new Array(i);o[0]=c;var l={};for(var s in t)hasOwnProperty.call(t,s)&&(l[s]=t[s]);l.originalType=e,l[p]="string"==typeof e?e:r,o[1]=l;for(var d=2;d<i;d++)o[d]=a[d];return n.createElement.apply(null,o)}return n.createElement.apply(null,a)}c.displayName="MDXCreateElement"},59037:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>u,contentTitle:()=>s,default:()=>h,frontMatter:()=>l,metadata:()=>d,toc:()=>p});var n=a(87462),r=a(63366),i=(a(67294),a(3905)),o=["components"],l={id:"upgrade-notes",title:"Upgrade notes"},s=void 0,d={unversionedId:"release-info/upgrade-notes",id:"release-info/upgrade-notes",title:"Upgrade notes",description:"\x3c!--",source:"@site/docs/latest/release-info/upgrade-notes.md",sourceDirName:"release-info",slug:"/release-info/upgrade-notes",permalink:"/docs/latest/release-info/upgrade-notes",draft:!1,tags:[],version:"current",frontMatter:{id:"upgrade-notes",title:"Upgrade notes"},sidebar:"docs",previous:{title:"Release notes",permalink:"/docs/latest/release-info/release-notes"}},u={},p=[{value:"29.0.1",id:"2901",level:2},{value:"Incompatible changes",id:"incompatible-changes",level:3},{value:"Changes to <code>targetDataSource</code> in EXPLAIN queries",id:"changes-to-targetdatasource-in-explain-queries",level:4},{value:"29.0.0",id:"2900",level:2},{value:"Upgrade notes",id:"upgrade-notes",level:3},{value:"Changed <code>equals</code> filter for native queries",id:"changed-equals-filter-for-native-queries",level:4},{value:"Console automatically sets <code>arrayIngestMode</code> for MSQ queries",id:"console-automatically-sets-arrayingestmode-for-msq-queries",level:4},{value:"Improved concurrent append and replace (experimental)",id:"improved-concurrent-append-and-replace-experimental",level:4},{value:"Enabled empty ingest queries",id:"enabled-empty-ingest-queries",level:4},{value:"Enabled query request queuing by default when total laning is turned on",id:"enabled-query-request-queuing-by-default-when-total-laning-is-turned-on",level:4},{value:"Changed how empty or null array columns are stored",id:"changed-how-empty-or-null-array-columns-are-stored",level:4},{value:"Changed how Druid allocates weekly segments",id:"changed-how-druid-allocates-weekly-segments",level:4},{value:"Removed the <code>auto</code> search strategy",id:"removed-the-auto-search-strategy",level:4},{value:"28.0.0",id:"2800",level:2},{value:"Upgrade notes",id:"upgrade-notes-1",level:3},{value:"Upgrade Druid segments table",id:"upgrade-druid-segments-table",level:4},{value:"Upgrade step for MySQL",id:"upgrade-step-for-mysql",level:5},{value:"Upgrade step for PostgreSQL",id:"upgrade-step-for-postgresql",level:5},{value:"Manual upgrade step",id:"manual-upgrade-step",level:5},{value:"Recommended syntax for SQL UNNEST",id:"recommended-syntax-for-sql-unnest",level:4},{value:"Dynamic parameters",id:"dynamic-parameters",level:4},{value:"Nested column format",id:"nested-column-format",level:4},{value:"SQL compatibility",id:"sql-compatibility",level:4},{value:"NULL filters",id:"null-filters",level:5},{value:"COUNT functions",id:"count-functions",level:5},{value:"GroupBy queries",id:"groupby-queries",level:5},{value:"Stop Supervisors that ingest from multiple Kafka topics before downgrading",id:"stop-supervisors-that-ingest-from-multiple-kafka-topics-before-downgrading",level:4},{value:"<code>lenientAggregatorMerge</code> deprecated",id:"lenientaggregatormerge-deprecated",level:4},{value:"Broker parallel merge config options",id:"broker-parallel-merge-config-options",level:4},{value:"Ingestion options for ARRAY typed columns",id:"ingestion-options-for-array-typed-columns",level:4},{value:"Incompatible changes",id:"incompatible-changes-1",level:3},{value:"Removed Hadoop 2",id:"removed-hadoop-2",level:4},{value:"Removed GroupBy v1",id:"removed-groupby-v1",level:4},{value:"Removed Coordinator dynamic configs",id:"removed-coordinator-dynamic-configs",level:4},{value:"Removed <code>cachingCost</code> strategy",id:"removed-cachingcost-strategy",level:4},{value:"Removed <code>InsertCannotOrderByDescending</code>",id:"removed-insertcannotorderbydescending",level:4},{value:"Removed the backward compatibility code for the Handoff API",id:"removed-the-backward-compatibility-code-for-the-handoff-api",level:4},{value:"27.0.0",id:"2700",level:2},{value:"Upgrade notes",id:"upgrade-notes-2",level:3},{value:"Worker input bytes for SQL-based ingestion",id:"worker-input-bytes-for-sql-based-ingestion",level:4},{value:"Parameter execution changes for Kafka",id:"parameter-execution-changes-for-kafka",level:4},{value:"Hadoop 2 deprecated",id:"hadoop-2-deprecated",level:4},{value:"GroupBy v1 deprecated",id:"groupby-v1-deprecated",level:4},{value:"Push-based real-time ingestion deprecated",id:"push-based-real-time-ingestion-deprecated",level:4},{value:"<code>cachingCost</code> segment balancing strategy deprecated",id:"cachingcost-segment-balancing-strategy-deprecated",level:4},{value:"Segment loading config changes",id:"segment-loading-config-changes",level:4},{value:"<code>SysMonitor</code> support deprecated",id:"sysmonitor-support-deprecated",level:4},{value:"Incompatible changes",id:"incompatible-changes-2",level:3},{value:"Removed property for setting max bytes for dimension lookup cache",id:"removed-property-for-setting-max-bytes-for-dimension-lookup-cache",level:4},{value:"Removed Coordinator dynamic configs",id:"removed-coordinator-dynamic-configs-1",level:4},{value:"26.0.0",id:"2600",level:2},{value:"Upgrade notes",id:"upgrade-notes-3",level:3},{value:"Real-time tasks",id:"real-time-tasks",level:4},{value:"Incompatible changes",id:"incompatible-changes-3",level:3},{value:"Firehose ingestion removed",id:"firehose-ingestion-removed",level:4},{value:"Information schema now uses numeric column types",id:"information-schema-now-uses-numeric-column-types",level:4},{value:"<code>frontCoded</code> segment format change",id:"frontcoded-segment-format-change",level:4},{value:"25.0.0",id:"2500",level:2},{value:"Upgrade notes",id:"upgrade-notes-4",level:3},{value:"Default HTTP-based segment discovery and task management",id:"default-http-based-segment-discovery-and-task-management",level:4},{value:"Finalizing HLL and quantiles sketch aggregates",id:"finalizing-hll-and-quantiles-sketch-aggregates",level:4},{value:"Kill tasks mark segments as unused only if specified",id:"kill-tasks-mark-segments-as-unused-only-if-specified",level:4},{value:"Incompatible changes",id:"incompatible-changes-4",level:3},{value:"Upgrade curator to 5.3.0",id:"upgrade-curator-to-530",level:4},{value:"Fixed Parquet list conversion",id:"fixed-parquet-list-conversion",level:4},{value:"24.0.0",id:"2400",level:2},{value:"Upgrade notes",id:"upgrade-notes-5",level:3},{value:"Permissions for multi-stage query engine",id:"permissions-for-multi-stage-query-engine",level:4},{value:"Behavior for unused segments",id:"behavior-for-unused-segments",level:4},{value:"Default for <code>druid.processing.fifo</code>",id:"default-for-druidprocessingfifo",level:4},{value:"Update to JDBC statement closure",id:"update-to-jdbc-statement-closure",level:4},{value:"0.23.0",id:"0230",level:2},{value:"Upgrade notes",id:"upgrade-notes-6",level:3},{value:"Auto-killing of segments",id:"auto-killing-of-segments",level:4},{value:"Other changes",id:"other-changes",level:4},{value:"0.22.0",id:"0220",level:2},{value:"Upgrade notes",id:"upgrade-notes-7",level:3},{value:"Dropped support for Apache ZooKeeper 3.4",id:"dropped-support-for-apache-zookeeper-34",level:4},{value:"Native batch ingestion segment allocation fix",id:"native-batch-ingestion-segment-allocation-fix",level:4},{value:"SQL timeseries no longer skip empty buckets with all granularity",id:"sql-timeseries-no-longer-skip-empty-buckets-with-all-granularity",level:4},{value:"Druid reingestion incompatible changes",id:"druid-reingestion-incompatible-changes",level:4},{value:"Druid web-console no longer supports IE11 and other older browsers",id:"druid-web-console-no-longer-supports-ie11-and-other-older-browsers",level:4},{value:"Changed default maximum segment loading queue size",id:"changed-default-maximum-segment-loading-queue-size",level:4},{value:"0.21.0",id:"0210",level:2},{value:"Improved HTTP status codes for query errors",id:"improved-http-status-codes-for-query-errors",level:4},{value:"Query interrupted metric",id:"query-interrupted-metric",level:4},{value:"context dimension in query metrics",id:"context-dimension-in-query-metrics",level:4},{value:"Deprecated support for Apache ZooKeeper 3.4",id:"deprecated-support-for-apache-zookeeper-34",level:4},{value:"Consistent serialization format and column naming convention for the sys.segments table",id:"consistent-serialization-format-and-column-naming-convention-for-the-syssegments-table",level:4}],m={toc:p},c="wrapper";function h(e){var t=e.components,a=(0,r.Z)(e,o);return(0,i.kt)(c,(0,n.Z)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,i.kt)("p",null,"The upgrade notes assume that you are upgrading from the Druid version that immediately precedes your target version. If you are upgrading across multiple versions, make sure you read the upgrade notes for all the intermediate versions."),(0,i.kt)("p",null,"For the full release notes for a specific version, see the ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/releases"},"releases page"),"."),(0,i.kt)("h2",{id:"2901"},"29.0.1"),(0,i.kt)("h3",{id:"incompatible-changes"},"Incompatible changes"),(0,i.kt)("h4",{id:"changes-to-targetdatasource-in-explain-queries"},"Changes to ",(0,i.kt)("inlineCode",{parentName:"h4"},"targetDataSource")," in EXPLAIN queries"),(0,i.kt)("p",null,"Druid 29.0.1 includes a breaking change that restores the behavior for ",(0,i.kt)("inlineCode",{parentName:"p"},"targetDataSource")," to its 28.0.0 and earlier state, different from Druid 29.0.0 and only 29.0.0. In 29.0.0, ",(0,i.kt)("inlineCode",{parentName:"p"},"targetDataSource")," returns a JSON object that includes the datasource name. In all other versions, ",(0,i.kt)("inlineCode",{parentName:"p"},"targetDataSource")," returns a string containing the name of the datasource."),(0,i.kt)("p",null,"If you're upgrading from any version other than 29.0.0, there is no change in behavior."),(0,i.kt)("p",null,"If you are upgrading from 29.0.0, this is an incompatible change."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/16004"},"#16004")),(0,i.kt)("h2",{id:"2900"},"29.0.0"),(0,i.kt)("h3",{id:"upgrade-notes"},"Upgrade notes"),(0,i.kt)("h4",{id:"changed-equals-filter-for-native-queries"},"Changed ",(0,i.kt)("inlineCode",{parentName:"h4"},"equals")," filter for native queries"),(0,i.kt)("p",null,"The ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/querying/filters#equality-filter"},"equality filter")," on mixed type ",(0,i.kt)("inlineCode",{parentName:"p"},"auto")," columns that contain arrays must now be filtered as their presenting type. This means that if any rows are arrays (for example, the segment metadata and ",(0,i.kt)("inlineCode",{parentName:"p"},"information_schema")," reports the type as some array type), then the native queries must also filter as if they are some array type."),(0,i.kt)("p",null,"This change impacts mixed type ",(0,i.kt)("inlineCode",{parentName:"p"},"auto")," columns that contain both scalars and arrays. It doesn't impact SQL, which already has this limitation due to how the type presents itself."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15503"},"#15503")),(0,i.kt)("h4",{id:"console-automatically-sets-arrayingestmode-for-msq-queries"},"Console automatically sets ",(0,i.kt)("inlineCode",{parentName:"h4"},"arrayIngestMode")," for MSQ queries"),(0,i.kt)("p",null,"Druid console now configures the ",(0,i.kt)("inlineCode",{parentName:"p"},"arrayIngestMode")," parameter in the data loading flow, and its value can persist across the SQL tab unless manually updated. When loading multi-value dimensions or arrays in the Druid console, note the value of the ",(0,i.kt)("inlineCode",{parentName:"p"},"arrayIngestMode")," parameter to prevent mixing multi-value dimensions and arrays in the same column of a data source."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15588"},"#15588")),(0,i.kt)("h4",{id:"improved-concurrent-append-and-replace-experimental"},"Improved concurrent append and replace (experimental)"),(0,i.kt)("p",null,"You no longer have to manually determine the task lock type for concurrent append and replace (experimental) with the ",(0,i.kt)("inlineCode",{parentName:"p"},"taskLockType")," task context. Instead, Druid can now determine it automatically for you. You can use the context parameter ",(0,i.kt)("inlineCode",{parentName:"p"},'"useConcurrentLocks": true')," for individual tasks and datasources or enable concurrent append and replace at a cluster level using ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.indexer.task.default.context"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15684"},"#15684")),(0,i.kt)("h4",{id:"enabled-empty-ingest-queries"},"Enabled empty ingest queries"),(0,i.kt)("p",null,"The MSQ task engine now allows empty ingest queries by default. For queries that don't generate any output rows, the MSQ task engine reports zero values for ",(0,i.kt)("inlineCode",{parentName:"p"},"numTotalRows")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"totalSizeInBytes")," instead of null. Previously, ingest queries that produced no data would fail with the ",(0,i.kt)("inlineCode",{parentName:"p"},"InsertCannotBeEmpty")," MSQ fault."),(0,i.kt)("p",null,"To revert to the original behavior, set the MSQ query parameter ",(0,i.kt)("inlineCode",{parentName:"p"},"failOnEmptyInsert")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"true"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15495"},"#15495")," ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15674"},"#15674")),(0,i.kt)("h4",{id:"enabled-query-request-queuing-by-default-when-total-laning-is-turned-on"},"Enabled query request queuing by default when total laning is turned on"),(0,i.kt)("p",null,"When query scheduler threads are less than server HTTP threads, total laning turns on.\nThis reserves some HTTP threads for non-query requests such as health checks.\nThe total laning previously would reject any query request that exceeds the lane capacity.\nNow, excess requests will instead be queued with a timeout equal to ",(0,i.kt)("inlineCode",{parentName:"p"},"MIN(Integer.MAX_VALUE, druid.server.http.maxQueryTimeout)"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15440"},"#15440")),(0,i.kt)("h4",{id:"changed-how-empty-or-null-array-columns-are-stored"},"Changed how empty or null array columns are stored"),(0,i.kt)("p",null,"Columns ingested with the auto column indexer that contain only empty or null arrays are now stored as ",(0,i.kt)("inlineCode",{parentName:"p"},"ARRAY<LONG\\>")," instead of ",(0,i.kt)("inlineCode",{parentName:"p"},"COMPLEX<json\\>"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15505"},"#15505")),(0,i.kt)("h4",{id:"changed-how-druid-allocates-weekly-segments"},"Changed how Druid allocates weekly segments"),(0,i.kt)("p",null,"When the requested granularity is a month or larger but a segment can't be allocated, Druid resorts to day partitioning.\nUnless explicitly specified, Druid skips week-granularity segments for data partitioning because these segments don't align with the end of the month or more coarse-grained intervals."),(0,i.kt)("p",null,"Previously, if Druid couldn't allocate segments by month, it tried allocating them by week next.\nIn the new behavior, Druid skips partitioning by week and goes directly to day. Week segments can only be allocated if the chosen partitioning in the append task is WEEK."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15589"},"#15589")),(0,i.kt)("h4",{id:"removed-the-auto-search-strategy"},"Removed the ",(0,i.kt)("inlineCode",{parentName:"h4"},"auto")," search strategy"),(0,i.kt)("p",null,"Removed the ",(0,i.kt)("inlineCode",{parentName:"p"},"auto")," search strategy from the native search query. Setting ",(0,i.kt)("inlineCode",{parentName:"p"},"searchStrategy")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"auto")," is now equivalent to ",(0,i.kt)("inlineCode",{parentName:"p"},"useIndexes"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/15550"},"#15550")),(0,i.kt)("h2",{id:"2800"},"28.0.0"),(0,i.kt)("h3",{id:"upgrade-notes-1"},"Upgrade notes"),(0,i.kt)("h4",{id:"upgrade-druid-segments-table"},"Upgrade Druid segments table"),(0,i.kt)("p",null,"Druid 28.0.0 adds a new column to the Druid metadata table that requires an update to the table."),(0,i.kt)("p",null,"If ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.metadata.storage.connector.createTables")," is set to ",(0,i.kt)("inlineCode",{parentName:"p"},"true")," and the metadata store user has DDL privileges, the segments table gets automatically updated at startup to include the new ",(0,i.kt)("inlineCode",{parentName:"p"},"used_status_last_updated")," column. No additional work is needed for the upgrade."),(0,i.kt)("p",null,"If either of those requirements are not met, pre-upgrade steps are required. You must make these updates before you upgrade to Druid 28.0.0, or the Coordinator and Overlord processes fail."),(0,i.kt)("p",null,"Although you can manually alter your table to add the new ",(0,i.kt)("inlineCode",{parentName:"p"},"used_status_last_updated")," column, Druid also provides a ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/operations/metadata-migration/#create-druid-tables"},"CLI tool")," to do it."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12599"},"#12599")," ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14868"},"#14868")),(0,i.kt)("p",null,"In the example commands below:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"lib")," is the Druid lib directory"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"extensions")," is the Druid extensions directory"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"base")," corresponds to the value of ",(0,i.kt)("inlineCode",{parentName:"li"},"druid.metadata.storage.tables.base")," in the configuration, ",(0,i.kt)("inlineCode",{parentName:"li"},"druid")," by default."),(0,i.kt)("li",{parentName:"ul"},"The ",(0,i.kt)("inlineCode",{parentName:"li"},"--connectURI")," parameter corresponds to the value of ",(0,i.kt)("inlineCode",{parentName:"li"},"druid.metadata.storage.connector.connectURI"),"."),(0,i.kt)("li",{parentName:"ul"},"The ",(0,i.kt)("inlineCode",{parentName:"li"},"--user")," parameter corresponds to the value of ",(0,i.kt)("inlineCode",{parentName:"li"},"druid.metadata.storage.connector.user"),"."),(0,i.kt)("li",{parentName:"ul"},"The ",(0,i.kt)("inlineCode",{parentName:"li"},"--password")," parameter corresponds to the value of ",(0,i.kt)("inlineCode",{parentName:"li"},"druid.metadata.storage.connector.password"),"."),(0,i.kt)("li",{parentName:"ul"},"The ",(0,i.kt)("inlineCode",{parentName:"li"},"--action")," parameter corresponds to the update action you are executing. In this case, it is ",(0,i.kt)("inlineCode",{parentName:"li"},"add-last-used-to-segments"))),(0,i.kt)("h5",{id:"upgrade-step-for-mysql"},"Upgrade step for MySQL"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-bash"},'cd ${DRUID_ROOT}\njava -classpath "lib/*" -Dlog4j.configurationFile=conf/druid/cluster/_common/log4j2.xml -Ddruid.extensions.directory="extensions" -Ddruid.extensions.loadList=[\\"mysql-metadata-storage\\"] -Ddruid.metadata.storage.type=mysql org.apache.druid.cli.Main tools metadata-update --connectURI="<mysql-uri>" --user USER --password PASSWORD --base druid --action add-used-flag-last-updated-to-segments\n')),(0,i.kt)("h5",{id:"upgrade-step-for-postgresql"},"Upgrade step for PostgreSQL"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-bash"},'cd ${DRUID_ROOT}\njava -classpath "lib/*" -Dlog4j.configurationFile=conf/druid/cluster/_common/log4j2.xml -Ddruid.extensions.directory="extensions" -Ddruid.extensions.loadList=[\\"postgresql-metadata-storage\\"] -Ddruid.metadata.storage.type=postgresql org.apache.druid.cli.Main tools metadata-update --connectURI="<postgresql-uri>" --user USER --password PASSWORD --base druid --action add-used-flag-last-updated-to-segments\n')),(0,i.kt)("h5",{id:"manual-upgrade-step"},"Manual upgrade step"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-SQL"},"ALTER TABLE druid_segments\nADD used_status_last_updated varchar(255);\n")),(0,i.kt)("h4",{id:"recommended-syntax-for-sql-unnest"},"Recommended syntax for SQL UNNEST"),(0,i.kt)("p",null,"The recommended syntax for SQL UNNEST has changed. We recommend using CROSS JOIN instead of commas for most queries to prevent issues with precedence. For example, use:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-sql"},"SELECT column_alias_name1 FROM datasource CROSS JOIN UNNEST(source_expression1) AS table_alias_name1(column_alias_name1) CROSS JOIN UNNEST(source_expression2) AS table_alias_name2(column_alias_name2), ...\n")),(0,i.kt)("p",null,"Do not use:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-sql"},"SELECT column_alias_name FROM datasource, UNNEST(source_expression1) AS table_alias_name1(column_alias_name1), UNNEST(source_expression2) AS table_alias_name2(column_alias_name2), ...\n")),(0,i.kt)("h4",{id:"dynamic-parameters"},"Dynamic parameters"),(0,i.kt)("p",null,"The Apache Calcite version has been upgraded from 1.21 to 1.35. As part of the Calcite upgrade, the behavior of type inference for dynamic parameters has changed. To avoid any type interference issues, explicitly ",(0,i.kt)("inlineCode",{parentName:"p"},"CAST")," all dynamic parameters as a specific data type in SQL queries. For example, use:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-sql"},"SELECT (1 * CAST (? as DOUBLE))/2 as tmp\n")),(0,i.kt)("p",null,"Do not use:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-sql"},"SELECT (1 * ?)/2 as tmp\n")),(0,i.kt)("h4",{id:"nested-column-format"},"Nested column format"),(0,i.kt)("p",null,(0,i.kt)("inlineCode",{parentName:"p"},"json")," type columns created with Druid 28.0.0 are not backwards compatible with Druid versions older than 26.0.0.\nIf you are upgrading from a version prior to Druid 26.0.0 and you use ",(0,i.kt)("inlineCode",{parentName:"p"},"json")," columns, upgrade to Druid 26.0.0 before you upgrade to Druid 28.0.0.\nAdditionally, to downgrade to a version older than Druid 26.0.0, any new segments created in Druid 28.0.0 should be re-ingested using Druid 26.0.0 or 27.0.0 prior to further downgrading."),(0,i.kt)("p",null,"When upgrading from a previous version, you can continue to write nested columns in a backwards compatible format (version 4)."),(0,i.kt)("p",null,"In a classic batch ingestion job, include ",(0,i.kt)("inlineCode",{parentName:"p"},"formatVersion")," in the ",(0,i.kt)("inlineCode",{parentName:"p"},"dimensions")," list of the ",(0,i.kt)("inlineCode",{parentName:"p"},"dimensionsSpec")," property. For example:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-json"},' "dimensionsSpec": {\n "dimensions": [\n "product",\n "department",\n {\n "type": "json",\n "name": "shipTo",\n "formatVersion": 4\n }\n ]\n },\n')),(0,i.kt)("p",null,"To set the default nested column version, set the desired format version in the common runtime properties. For example:"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-java"},"druid.indexing.formats.nestedColumnFormatVersion=4\n")),(0,i.kt)("h4",{id:"sql-compatibility"},"SQL compatibility"),(0,i.kt)("p",null,"Starting with Druid 28.0.0, the default way Druid treats nulls and booleans has changed."),(0,i.kt)("p",null,"For nulls, Druid now differentiates between an empty string and a record with no data as well as between an empty numerical record and ",(0,i.kt)("inlineCode",{parentName:"p"},"0"),".",(0,i.kt)("br",{parentName:"p"}),"\n","You can revert to the previous behavior by setting ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.generic.useDefaultValueForNull")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"true"),"."),(0,i.kt)("p",null,"This property affects both storage and querying, and must be set on all Druid service types to be available at both ingestion time and query time. Reverting this setting to the old value restores the previous behavior without reingestion."),(0,i.kt)("p",null,"For booleans, Druid now strictly uses ",(0,i.kt)("inlineCode",{parentName:"p"},"1")," (true) or ",(0,i.kt)("inlineCode",{parentName:"p"},"0")," (false). Previously, true and false could be represented either as ",(0,i.kt)("inlineCode",{parentName:"p"},"true")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"false")," as well as ",(0,i.kt)("inlineCode",{parentName:"p"},"1")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"0"),", respectively. In addition, Druid now returns a null value for boolean comparisons like ",(0,i.kt)("inlineCode",{parentName:"p"},"True && NULL"),"."),(0,i.kt)("p",null,"You can revert to the previous behavior by setting ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.expressions.useStrictBooleans")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"false"),".\nThis property affects both storage and querying, and must be set on all Druid service types to be available at both ingestion time and query time. Reverting this setting to the old value restores the previous behavior without reingestion."),(0,i.kt)("p",null,"The following table illustrates some example scenarios and the impact of the changes."),(0,i.kt)("details",null,(0,i.kt)("summary",null,"Show the table"),(0,i.kt)("table",null,(0,i.kt)("thead",{parentName:"table"},(0,i.kt)("tr",{parentName:"thead"},(0,i.kt)("th",{parentName:"tr",align:null},"Query"),(0,i.kt)("th",{parentName:"tr",align:null},"Druid 27.0.0 and earlier"),(0,i.kt)("th",{parentName:"tr",align:null},"Druid 28.0.0 and later"))),(0,i.kt)("tbody",{parentName:"table"},(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Query empty string"),(0,i.kt)("td",{parentName:"tr",align:null},"Empty string (",(0,i.kt)("inlineCode",{parentName:"td"},"''"),") or null"),(0,i.kt)("td",{parentName:"tr",align:null},"Empty string (",(0,i.kt)("inlineCode",{parentName:"td"},"''"),")")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Query null string"),(0,i.kt)("td",{parentName:"tr",align:null},"Null or empty"),(0,i.kt)("td",{parentName:"tr",align:null},"Null")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"COUNT(*)"),(0,i.kt)("td",{parentName:"tr",align:null},"All rows, including nulls"),(0,i.kt)("td",{parentName:"tr",align:null},"All rows, including nulls")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"COUNT(column)"),(0,i.kt)("td",{parentName:"tr",align:null},"All rows excluding empty strings"),(0,i.kt)("td",{parentName:"tr",align:null},"All rows including empty strings but excluding nulls")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Expression 100 && 11"),(0,i.kt)("td",{parentName:"tr",align:null},"11"),(0,i.kt)("td",{parentName:"tr",align:null},"1")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Expression 100 ","|","|"," 11"),(0,i.kt)("td",{parentName:"tr",align:null},"100"),(0,i.kt)("td",{parentName:"tr",align:null},"1")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Null FLOAT/DOUBLE column"),(0,i.kt)("td",{parentName:"tr",align:null},"0.0"),(0,i.kt)("td",{parentName:"tr",align:null},"Null")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Null LONG column"),(0,i.kt)("td",{parentName:"tr",align:null},"0"),(0,i.kt)("td",{parentName:"tr",align:null},"Null")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Null ",(0,i.kt)("inlineCode",{parentName:"td"},"__time")," column"),(0,i.kt)("td",{parentName:"tr",align:null},"0, meaning 1970-01-01 00:00:00 UTC"),(0,i.kt)("td",{parentName:"tr",align:null},"1970-01-01 00:00:00 UTC")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"Null MVD column"),(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"''")),(0,i.kt)("td",{parentName:"tr",align:null},"Null")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"ARRAY"),(0,i.kt)("td",{parentName:"tr",align:null},"Null"),(0,i.kt)("td",{parentName:"tr",align:null},"Null")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"COMPLEX"),(0,i.kt)("td",{parentName:"tr",align:null},"none"),(0,i.kt)("td",{parentName:"tr",align:null},"Null"))))),(0,i.kt)("p",null,"Before upgrading to Druid 28.0.0, update your queries to account for the changed behavior as described in the following sections."),(0,i.kt)("h5",{id:"null-filters"},"NULL filters"),(0,i.kt)("p",null,"If your queries use NULL in the filter condition to match both nulls and empty strings, you should add an explicit filter clause for empty strings. For example, update ",(0,i.kt)("inlineCode",{parentName:"p"},"s IS NULL")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"s IS NULL OR s = ''"),"."),(0,i.kt)("h5",{id:"count-functions"},"COUNT functions"),(0,i.kt)("p",null,(0,i.kt)("inlineCode",{parentName:"p"},"COUNT(column)")," now counts empty strings. If you want to continue excluding empty strings from the count, replace ",(0,i.kt)("inlineCode",{parentName:"p"},"COUNT(column)")," with ",(0,i.kt)("inlineCode",{parentName:"p"},"COUNT(column) FILTER(WHERE column <> '')"),"."),(0,i.kt)("h5",{id:"groupby-queries"},"GroupBy queries"),(0,i.kt)("p",null,"GroupBy queries on columns containing null values can now have additional entries as nulls can co-exist with empty strings."),(0,i.kt)("h4",{id:"stop-supervisors-that-ingest-from-multiple-kafka-topics-before-downgrading"},"Stop Supervisors that ingest from multiple Kafka topics before downgrading"),(0,i.kt)("p",null,"If you have added supervisors that ingest from multiple Kafka topics in Druid 28.0.0 or later, stop those supervisors before downgrading to a version prior to Druid 28.0.0 because the supervisors will fail in versions prior to Druid 28.0.0."),(0,i.kt)("h4",{id:"lenientaggregatormerge-deprecated"},(0,i.kt)("inlineCode",{parentName:"h4"},"lenientAggregatorMerge")," deprecated"),(0,i.kt)("p",null,(0,i.kt)("inlineCode",{parentName:"p"},"lenientAggregatorMerge")," property in segment metadata queries has been deprecated. It will be removed in future releases.\nUse ",(0,i.kt)("inlineCode",{parentName:"p"},"aggregatorMergeStrategy")," instead. ",(0,i.kt)("inlineCode",{parentName:"p"},"aggregatorMergeStrategy")," also supports the ",(0,i.kt)("inlineCode",{parentName:"p"},"latest")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"earliest")," strategies in addition to ",(0,i.kt)("inlineCode",{parentName:"p"},"strict")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"lenient")," strategies from ",(0,i.kt)("inlineCode",{parentName:"p"},"lenientAggregatorMerge"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14560"},"#14560"),"\n",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14598"},"#14598")),(0,i.kt)("h4",{id:"broker-parallel-merge-config-options"},"Broker parallel merge config options"),(0,i.kt)("p",null,"The paths for ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.processing.merge.pool.*")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.processing.merge.task.*")," have been flattened to use ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.processing.merge.*")," instead. The legacy paths for the configs are now deprecated and will be removed in a future release. Migrate your settings to use the new paths because the old paths will be ignored in the future."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14695"},"#14695")),(0,i.kt)("h4",{id:"ingestion-options-for-array-typed-columns"},"Ingestion options for ARRAY typed columns"),(0,i.kt)("p",null,"Starting with Druid 28.0.0, the MSQ task engine can detect and ingest arrays as ARRAY typed columns when you set the query context parameter ",(0,i.kt)("inlineCode",{parentName:"p"},"arrayIngestMode")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"array"),".\nThe ",(0,i.kt)("inlineCode",{parentName:"p"},"arrayIngestMode")," context parameter controls how ARRAY type values are stored in Druid segments."),(0,i.kt)("p",null,"When you set ",(0,i.kt)("inlineCode",{parentName:"p"},"arrayIngestMode")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"array")," (recommended for SQL compliance), the MSQ task engine stores all ARRAY typed values in ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/querying/arrays"},"ARRAY typed columns")," and supports storing both VARCHAR and numeric typed arrays."),(0,i.kt)("p",null,"For backwards compatibility, ",(0,i.kt)("inlineCode",{parentName:"p"},"arrayIngestMode")," defaults to ",(0,i.kt)("inlineCode",{parentName:"p"},"mvd"),". When ",(0,i.kt)("inlineCode",{parentName:"p"},'"arrayIngestMode":"mvd"'),", Druid only supports VARCHAR typed arrays and stores them as ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/querying/multi-value-dimensions"},"multi-value string columns"),"."),(0,i.kt)("p",null,"When you set ",(0,i.kt)("inlineCode",{parentName:"p"},"arrayIngestMode")," to ",(0,i.kt)("inlineCode",{parentName:"p"},"none"),", Druid throws an exception when trying to store any type of arrays."),(0,i.kt)("p",null,"For more information on how to ingest ",(0,i.kt)("inlineCode",{parentName:"p"},"ARRAY")," typed columns with SQL-based ingestion, see ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/querying/sql-data-types#arrays"},"SQL data types")," and ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/querying/arrays"},"Array columns"),"."),(0,i.kt)("h3",{id:"incompatible-changes-1"},"Incompatible changes"),(0,i.kt)("h4",{id:"removed-hadoop-2"},"Removed Hadoop 2"),(0,i.kt)("p",null,"Support for Hadoop 2 has been removed.\nMigrate to SQL-based ingestion or JSON-based batch ingestion if you are using Hadoop 2.x for ingestion today.\nIf migrating to Druid's built-in ingestion is not possible, you must upgrade your Hadoop infrastructure to 3.x+ before upgrading to Druid 28.0.0."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14763"},"#14763")),(0,i.kt)("h4",{id:"removed-groupby-v1"},"Removed GroupBy v1"),(0,i.kt)("p",null,"The GroupBy v1 engine has been removed. Use the GroupBy v2 engine instead, which has been the default GroupBy engine for several releases.\nThere should be no impact on your queries."),(0,i.kt)("p",null,"Additionally, ",(0,i.kt)("inlineCode",{parentName:"p"},"AggregatorFactory.getRequiredColumns")," has been deprecated and will be removed in a future release. If you have an extension that implements ",(0,i.kt)("inlineCode",{parentName:"p"},"AggregatorFactory"),", then this method should be removed from your implementation."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14866"},"#14866")),(0,i.kt)("h4",{id:"removed-coordinator-dynamic-configs"},"Removed Coordinator dynamic configs"),(0,i.kt)("p",null,"The ",(0,i.kt)("inlineCode",{parentName:"p"},"decommissioningMaxPercentOfMaxSegmentsToMove")," config has been removed.\nThe use case for this config is handled by smart segment loading now, which is enabled by default."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14923"},"#14923")),(0,i.kt)("h4",{id:"removed-cachingcost-strategy"},"Removed ",(0,i.kt)("inlineCode",{parentName:"h4"},"cachingCost")," strategy"),(0,i.kt)("p",null,"The ",(0,i.kt)("inlineCode",{parentName:"p"},"cachingCost")," strategy for segment loading has been removed.\nUse ",(0,i.kt)("inlineCode",{parentName:"p"},"cost")," instead, which has the same benefits as ",(0,i.kt)("inlineCode",{parentName:"p"},"cachingCost"),"."),(0,i.kt)("p",null,"If you have ",(0,i.kt)("inlineCode",{parentName:"p"},"cachingCost")," set, the system ignores this setting and automatically uses ",(0,i.kt)("inlineCode",{parentName:"p"},"cost"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14798"},"#14798")),(0,i.kt)("h4",{id:"removed-insertcannotorderbydescending"},"Removed ",(0,i.kt)("inlineCode",{parentName:"h4"},"InsertCannotOrderByDescending")),(0,i.kt)("p",null,"The deprecated MSQ fault ",(0,i.kt)("inlineCode",{parentName:"p"},"InsertCannotOrderByDescending")," has been removed."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14588"},"#14588")),(0,i.kt)("h4",{id:"removed-the-backward-compatibility-code-for-the-handoff-api"},"Removed the backward compatibility code for the Handoff API"),(0,i.kt)("p",null,"The backward compatibility code for the Handoff API in ",(0,i.kt)("inlineCode",{parentName:"p"},"CoordinatorBasedSegmentHandoffNotifier")," has been removed.\nIf you are upgrading from a Druid version older than 0.14.0, upgrade to a newer version of Druid before upgrading to Druid 28.0.0."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14652"},"#14652")),(0,i.kt)("h2",{id:"2700"},"27.0.0"),(0,i.kt)("h3",{id:"upgrade-notes-2"},"Upgrade notes"),(0,i.kt)("h4",{id:"worker-input-bytes-for-sql-based-ingestion"},"Worker input bytes for SQL-based ingestion"),(0,i.kt)("p",null,"The maximum input bytes for each worker for SQL-based ingestion is now 512 MiB (previously 10 GiB)."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14307"},"#14307")),(0,i.kt)("h4",{id:"parameter-execution-changes-for-kafka"},"Parameter execution changes for Kafka"),(0,i.kt)("p",null,"When using the built-in ",(0,i.kt)("inlineCode",{parentName:"p"},"FileConfigProvider")," for Kafka, interpolations are now intercepted by the JsonConfigurator instead of being passed down to the Kafka provider. This breaks existing deployments."),(0,i.kt)("p",null,"For more information, see ",(0,i.kt)("a",{parentName:"p",href:"https://cwiki.apache.org/confluence/display/KAFKA/KIP-297%3A+Externalizing+Secrets+for+Connect+Configurations"},"KIP-297"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13023"},"#13023")),(0,i.kt)("h4",{id:"hadoop-2-deprecated"},"Hadoop 2 deprecated"),(0,i.kt)("p",null,"Many of the important dependent libraries that Druid uses no longer support Hadoop 2. In order for Druid to stay current and have pathways to mitigate security vulnerabilities, the community has decided to deprecate support for Hadoop 2.x releases starting this release. Starting with Druid 28.x, Hadoop 3.x is the only supported Hadoop version."),(0,i.kt)("p",null,"Consider migrating to SQL-based ingestion or native ingestion if you are using Hadoop 2.x for ingestion today. If migrating to Druid ingestion is not possible, plan to upgrade your Hadoop infrastructure before upgrading to the next Druid release."),(0,i.kt)("h4",{id:"groupby-v1-deprecated"},"GroupBy v1 deprecated"),(0,i.kt)("p",null,"GroupBy queries using the v1 legacy engine has been deprecated. It will be removed in future releases. Use v2 instead. Note that v2 has been the default GroupBy engine."),(0,i.kt)("p",null,"For more information, see ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/querying/groupbyquery.html"},"GroupBy queries"),"."),(0,i.kt)("h4",{id:"push-based-real-time-ingestion-deprecated"},"Push-based real-time ingestion deprecated"),(0,i.kt)("p",null,"Support for push-based real-time ingestion has been deprecated. It will be removed in future releases."),(0,i.kt)("h4",{id:"cachingcost-segment-balancing-strategy-deprecated"},(0,i.kt)("inlineCode",{parentName:"h4"},"cachingCost")," segment balancing strategy deprecated"),(0,i.kt)("p",null,"The ",(0,i.kt)("inlineCode",{parentName:"p"},"cachingCost")," strategy has been deprecated and will be removed in future releases. Use an alternate segment balancing strategy instead, such as ",(0,i.kt)("inlineCode",{parentName:"p"},"cost"),"."),(0,i.kt)("h4",{id:"segment-loading-config-changes"},"Segment loading config changes"),(0,i.kt)("p",null,"The following segment related configs are now deprecated and will be removed in future releases: "),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"maxSegmentsInNodeLoadingQueue")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"maxSegmentsToMove")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"replicationThrottleLimit")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"useRoundRobinSegmentAssignment")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"replicantLifetime")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"maxNonPrimaryReplicantsToLoad")),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"decommissioningMaxPercentOfMaxSegmentsToMove"))),(0,i.kt)("p",null,"Use ",(0,i.kt)("inlineCode",{parentName:"p"},"smartSegmentLoading")," mode instead, which calculates values for these variables automatically."),(0,i.kt)("p",null,"Additionally, the defaults for the following Coordinator dynamic configs have changed:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"maxsegmentsInNodeLoadingQueue")," : 500, previously 100"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"maxSegmentsToMove"),": 100, previously 5"),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"replicationThrottleLimit"),": 500, previously 10")),(0,i.kt)("p",null,"These new defaults can improve performance for most use cases."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13197"},"#13197"),"\n",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14269"},"#14269")),(0,i.kt)("h4",{id:"sysmonitor-support-deprecated"},(0,i.kt)("inlineCode",{parentName:"h4"},"SysMonitor")," support deprecated"),(0,i.kt)("p",null,"Switch to ",(0,i.kt)("inlineCode",{parentName:"p"},"OshiSysMonitor")," as ",(0,i.kt)("inlineCode",{parentName:"p"},"SysMonitor")," is now deprecated and will be removed in future releases."),(0,i.kt)("h3",{id:"incompatible-changes-2"},"Incompatible changes"),(0,i.kt)("h4",{id:"removed-property-for-setting-max-bytes-for-dimension-lookup-cache"},"Removed property for setting max bytes for dimension lookup cache"),(0,i.kt)("p",null,(0,i.kt)("inlineCode",{parentName:"p"},"druid.processing.columnCache.sizeBytes")," has been removed since it provided limited utility after a number of internal changes. Leaving this config is harmless, but it does nothing."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14500"},"#14500")),(0,i.kt)("h4",{id:"removed-coordinator-dynamic-configs-1"},"Removed Coordinator dynamic configs"),(0,i.kt)("p",null,"The following Coordinator dynamic configs have been removed:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"emitBalancingStats"),": Stats for errors encountered while balancing will always be emitted. Other debugging stats will not be emitted but can be logged by setting the appropriate ",(0,i.kt)("inlineCode",{parentName:"li"},"debugDimensions"),"."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"useBatchedSegmentSampler")," and ",(0,i.kt)("inlineCode",{parentName:"li"},"percentOfSegmentsToConsiderPerMove"),": Batched segment sampling is now the standard and will always be on.")),(0,i.kt)("p",null,"Use the new ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/configuration/#smart-segment-loading"},"smart segment loading")," mode instead."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/14524"},"#14524")),(0,i.kt)("h2",{id:"2600"},"26.0.0"),(0,i.kt)("h3",{id:"upgrade-notes-3"},"Upgrade notes"),(0,i.kt)("h4",{id:"real-time-tasks"},"Real-time tasks"),(0,i.kt)("p",null,"Optimized query performance by lowering the default maxRowsInMemory for real-time ingestion, which might lower overall ingestion throughput."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13939"},"#13939")),(0,i.kt)("h3",{id:"incompatible-changes-3"},"Incompatible changes"),(0,i.kt)("h4",{id:"firehose-ingestion-removed"},"Firehose ingestion removed"),(0,i.kt)("p",null,"The firehose/parser specification used by legacy Druid streaming formats is removed.\nFirehose ingestion was deprecated in version 0.17, and support for this ingestion was removed in version 24.0.0."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12852"},"#12852")),(0,i.kt)("h4",{id:"information-schema-now-uses-numeric-column-types"},"Information schema now uses numeric column types"),(0,i.kt)("p",null,"The Druid system table (",(0,i.kt)("inlineCode",{parentName:"p"},"INFORMATION_SCHEMA"),") now uses SQL types instead of Druid types for columns. This change makes the ",(0,i.kt)("inlineCode",{parentName:"p"},"INFORMATION_SCHEMA")," table behave more like standard SQL. You may need to update your queries in the following scenarios in order to avoid unexpected results if you depend either of the following:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"Numeric fields being treated as strings."),(0,i.kt)("li",{parentName:"ul"},"Column numbering starting at 0. Column numbering is now 1-based.")),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13777"},"#13777")),(0,i.kt)("h4",{id:"frontcoded-segment-format-change"},(0,i.kt)("inlineCode",{parentName:"h4"},"frontCoded")," segment format change"),(0,i.kt)("p",null,"The ",(0,i.kt)("inlineCode",{parentName:"p"},"frontCoded")," type of ",(0,i.kt)("inlineCode",{parentName:"p"},"stringEncodingStrategy")," on ",(0,i.kt)("inlineCode",{parentName:"p"},"indexSpec")," with a new segment format version, which typically has faster read speeds and reduced segment size. This improvement is backwards incompatible with Druid 25.0.0."),(0,i.kt)("h2",{id:"2500"},"25.0.0"),(0,i.kt)("h3",{id:"upgrade-notes-4"},"Upgrade notes"),(0,i.kt)("h4",{id:"default-http-based-segment-discovery-and-task-management"},"Default HTTP-based segment discovery and task management"),(0,i.kt)("p",null,"The default segment discovery method now uses HTTP instead of ZooKeeper."),(0,i.kt)("p",null,"This update changes the defaults for the following properties:"),(0,i.kt)("table",null,(0,i.kt)("thead",{parentName:"table"},(0,i.kt)("tr",{parentName:"thead"},(0,i.kt)("th",{parentName:"tr",align:null},"Property"),(0,i.kt)("th",{parentName:"tr",align:null},"New default"),(0,i.kt)("th",{parentName:"tr",align:null},"Previous default"))),(0,i.kt)("tbody",{parentName:"table"},(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"druid.serverview.type")," for segment management"),(0,i.kt)("td",{parentName:"tr",align:null},"http"),(0,i.kt)("td",{parentName:"tr",align:null},"batch")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"druid.coordinator.loadqueuepeon.type")," for segment management"),(0,i.kt)("td",{parentName:"tr",align:null},"http"),(0,i.kt)("td",{parentName:"tr",align:null},"curator")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"druid.indexer.runner.type")," for the Overlord"),(0,i.kt)("td",{parentName:"tr",align:null},"httpRemote"),(0,i.kt)("td",{parentName:"tr",align:null},"local")))),(0,i.kt)("p",null,"To use ZooKeeper instead of HTTP, change the values for the properties back to the previous defaults. ZooKeeper-based implementations for these properties are deprecated and will be removed in a subsequent release."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13092"},"#13092")),(0,i.kt)("h4",{id:"finalizing-hll-and-quantiles-sketch-aggregates"},"Finalizing HLL and quantiles sketch aggregates"),(0,i.kt)("p",null,"The aggregation functions for HLL and quantiles sketches returned sketches or numbers when they are finalized depending on where they were in the native query plan."),(0,i.kt)("p",null,"Druid no longer finalizes aggregators in the following two cases:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"aggregators appear in the outer level of a query"),(0,i.kt)("li",{parentName:"ul"},"aggregators are used as input to an expression or finalizing-field-access post-aggregator")),(0,i.kt)("p",null,"This change aligns the behavior of HLL and quantiles sketches with theta sketches."),(0,i.kt)("p",null,"To restore old behavior, you can set ",(0,i.kt)("inlineCode",{parentName:"p"},"sqlFinalizeOuterSketches=true")," in the query context."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13247"},"#13247")),(0,i.kt)("h4",{id:"kill-tasks-mark-segments-as-unused-only-if-specified"},"Kill tasks mark segments as unused only if specified"),(0,i.kt)("p",null,"When you issue a kill task, Druid marks the underlying segments as unused only if explicitly specified. For more information, see the ",(0,i.kt)("a",{parentName:"p",href:"https://druid.apache.org/docs/latest/api-reference/data-management-api"},"API reference"),"."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13104"},"#13104")),(0,i.kt)("h3",{id:"incompatible-changes-4"},"Incompatible changes"),(0,i.kt)("h4",{id:"upgrade-curator-to-530"},"Upgrade curator to 5.3.0"),(0,i.kt)("p",null,"Apache Curator upgraded to the latest version, 5.3.0. This version drops support for ZooKeeper 3.4 but Druid has already officially dropped support in 0.22. In 5.3.0, Curator has removed support for Exhibitor so all related configurations and tests have been removed."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12939"},"#12939")),(0,i.kt)("h4",{id:"fixed-parquet-list-conversion"},"Fixed Parquet list conversion"),(0,i.kt)("p",null,"The behavior of the parquet reader for lists of structured objects has been changed to be consistent with other parquet logical list conversions. The data is now fetched directly, more closely matching its expected structure."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/13294"},"#13294")),(0,i.kt)("h2",{id:"2400"},"24.0.0"),(0,i.kt)("h3",{id:"upgrade-notes-5"},"Upgrade notes"),(0,i.kt)("h4",{id:"permissions-for-multi-stage-query-engine"},"Permissions for multi-stage query engine"),(0,i.kt)("p",null,"To read external data using the multi-stage query task engine, you must have READ permissions for the EXTERNAL resource type. Users without the correct permission encounter a 403 error when trying to run SQL queries that include EXTERN."),(0,i.kt)("p",null,"The way you assign the permission depends on your authorizer. For example, with ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/blob/druid-24.0.0/docs/operations/security-user-auth.md"},"basic security")," in Druid, add the EXTERNAL READ permission by sending a POST request to the ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/blob/druid-24.0.0/docs/development/extensions-core/druid-basic-security.md#permissions"},"roles API"),"."),(0,i.kt)("p",null,"The example adds permissions for users with the admin role using a basic authorizer named MyBasicMetadataAuthorizer. The following permissions are granted:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"DATASOURCE READ"),(0,i.kt)("li",{parentName:"ul"},"DATASOURCE WRITE"),(0,i.kt)("li",{parentName:"ul"},"CONFIG READ"),(0,i.kt)("li",{parentName:"ul"},"CONFIG WRITE"),(0,i.kt)("li",{parentName:"ul"},"STATE READ"),(0,i.kt)("li",{parentName:"ul"},"STATE WRITE"),(0,i.kt)("li",{parentName:"ul"},"EXTERNAL READ")),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre"},'curl --location --request POST \'http://localhost:8081/druid-ext/basic-security/authorization/db/MyBasicMetadataAuthorizer/roles/admin/permissions\' \\\n--header \'Content-Type: application/json\' \\\n--data-raw \'[\n{\n "resource": {\n "name": ".*",\n "type": "DATASOURCE"\n },\n "action": "READ"\n},\n{\n "resource": {\n "name": ".*",\n "type": "DATASOURCE"\n },\n "action": "WRITE"\n},\n{\n "resource": {\n "name": ".*",\n "type": "CONFIG"\n },\n "action": "READ"\n},\n{\n "resource": {\n "name": ".*",\n "type": "CONFIG"\n },\n "action": "WRITE"\n},\n{\n "resource": {\n "name": ".*",\n "type": "STATE"\n },\n "action": "READ"\n},\n{\n "resource": {\n "name": ".*",\n "type": "STATE"\n },\n "action": "WRITE"\n},\n{\n "resource": {\n "name": "EXTERNAL",\n "type": "EXTERNAL"\n },\n "action": "READ"\n}\n]\'\n')),(0,i.kt)("h4",{id:"behavior-for-unused-segments"},"Behavior for unused segments"),(0,i.kt)("p",null,"Druid automatically retains any segments marked as unused. Previously, Druid permanently deleted unused segments from metadata store and deep storage after their duration to retain passed. This behavior was reverted from 0.23.0."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12693"},"#12693")),(0,i.kt)("h4",{id:"default-for-druidprocessingfifo"},"Default for ",(0,i.kt)("inlineCode",{parentName:"h4"},"druid.processing.fifo")),(0,i.kt)("p",null,"The default for ",(0,i.kt)("inlineCode",{parentName:"p"},"druid.processing.fifo")," is now true. This means that tasks of equal priority are treated in a FIFO manner. For most use cases, this change can improve performance on heavily loaded clusters."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12571"},"#12571")),(0,i.kt)("h4",{id:"update-to-jdbc-statement-closure"},"Update to JDBC statement closure"),(0,i.kt)("p",null,"In previous releases, Druid automatically closed the JDBC Statement when the ResultSet was closed. Druid closed the ResultSet on EOF. Druid closed the statement on any exception. This behavior is, however, non-standard.\nIn this release, Druid's JDBC driver follows the JDBC standards more closely:\nThe ResultSet closes automatically on EOF, but does not close the Statement or PreparedStatement. Your code must close these statements, perhaps by using a try-with-resources block.\nThe PreparedStatement can now be used multiple times with different parameters. (Previously this was not true since closing the ResultSet closed the PreparedStatement.)\nIf any call to a Statement or PreparedStatement raises an error, the client code must still explicitly close the statement. According to the JDBC standards, statements are not closed automatically on errors. This allows you to obtain information about a failed statement before closing it.\nIf you have code that depended on the old behavior, you may have to change your code to add the required close statement."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12709"},"#12709")),(0,i.kt)("h2",{id:"0230"},"0.23.0"),(0,i.kt)("h3",{id:"upgrade-notes-6"},"Upgrade notes"),(0,i.kt)("h4",{id:"auto-killing-of-segments"},"Auto-killing of segments"),(0,i.kt)("p",null,"In 0.23.0, Auto killing of segments is now enabled by default ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12187"},"(#12187)"),". The new defaults should kill all unused segments older than 90 days. If users do not want this behavior on an upgrade, they should explicitly disable the behavior. This is a risky change since depending on the interval, segments will be killed immediately after being marked unused. this behavior will be reverted or changed in the next druid release. Please see ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/12693"},"(#12693)")," for more details."),(0,i.kt)("h4",{id:"other-changes"},"Other changes"),(0,i.kt)("h1",{id:"other-changes-1"},"Other changes"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},"Kinesis ingestion requires ",(0,i.kt)("inlineCode",{parentName:"li"},"listShards")," API access on the stream."),(0,i.kt)("li",{parentName:"ul"},"Kafka clients libraries have been upgraded to 3.0.0 ",(0,i.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/11735"},"(#11735)")),(0,i.kt)("li",{parentName:"ul"},"The dynamic coordinator config, ",(0,i.kt)("inlineCode",{parentName:"li"},"percentOfSegmentsToConsiderPerMove")," has been deprecated and will be removed in a future release of Druid. It is being replaced by a new segment picking strategy introduced in ",(0,i.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/11257"},"(#11257)"),". This new strategy is currently toggled off by default, but can be toggled on if you set the dynamic coordinator config ",(0,i.kt)("inlineCode",{parentName:"li"},"useBatchedSegmentSampler")," to true. Setting this as such, will disable the use of the deprecated ",(0,i.kt)("inlineCode",{parentName:"li"},"percentOfSegmentsToConsiderPerMove"),". In a future release, ",(0,i.kt)("inlineCode",{parentName:"li"},"useBatchedSegmentSampler")," will become permanently true. ",(0,i.kt)("a",{parentName:"li",href:"https://github.com/apache/druid/pull/11960"},"(#11960)"))),(0,i.kt)("h2",{id:"0220"},"0.22.0"),(0,i.kt)("h3",{id:"upgrade-notes-7"},"Upgrade notes"),(0,i.kt)("h4",{id:"dropped-support-for-apache-zookeeper-34"},"Dropped support for Apache ZooKeeper 3.4"),(0,i.kt)("p",null,"Following up to 0.21, which officially deprecated support for ZooKeeper 3.4, ",(0,i.kt)("a",{parentName:"p",href:"https://lists.apache.org/thread/xckr6nnsg9rxchkbvltkvt7hr2d0mhbo"},"which has been end-of-life for a while"),", support for ZooKeeper 3.4 is now removed in 0.22.0. Be sure to upgrade your ZooKeeper cluster prior to upgrading your Druid cluster to 0.22.0."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/issues/10780"},"#10780"),"\n",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/11073"},"#11073")),(0,i.kt)("h4",{id:"native-batch-ingestion-segment-allocation-fix"},"Native batch ingestion segment allocation fix"),(0,i.kt)("p",null,"Druid 0.22.0 includes an important bug-fix in native batch indexing where transient failures of indexing sub-tasks can result in non-contiguous partitions in the result segments, which will never become queryable due to logic which checks for the 'complete' set. This issue has been resolved in the latest version of Druid, but required a change in the protocol which batch tasks use to allocate segments, and this change can cause issues during rolling downgrades if you decide to roll back from Druid 0.22.0 to an earlier version."),(0,i.kt)("p",null,"To avoid task failure during a rolling-downgrade, set"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre"},'druid.indexer.task.default.context={ "useLineageBasedSegmentAllocation" : false }\n')),(0,i.kt)("p",null,"in the overlord runtime properties, and wait for all tasks which have ",(0,i.kt)("inlineCode",{parentName:"p"},"useLineageBasedSegmentAllocation")," set to true to complete before initiating the downgrade. After these tasks have all completed the downgrade shouldn't have any further issue and the setting can be removed from the overlord configuration (recommended, as you will want this setting enabled if you are running Druid 0.22.0 or newer)."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/11189"},"#11189")),(0,i.kt)("h4",{id:"sql-timeseries-no-longer-skip-empty-buckets-with-all-granularity"},"SQL timeseries no longer skip empty buckets with all granularity"),(0,i.kt)("p",null,"Prior to Druid 0.22, an SQL group by query which is using a single universal grouping key (e.g. only aggregators) such as ",(0,i.kt)("inlineCode",{parentName:"p"},"SELECT COUNT(*), SUM(x) FROM y WHERE z = 'someval'")," would produce an empty result set instead of ",(0,i.kt)("inlineCode",{parentName:"p"},"[0, null]")," that might be expected from this query matching no results. This was because underneath this would plan into a timeseries query with 'ALL' granularity, and skipEmptyBuckets set to true in the query context. This latter option caused the results of such a query to return no results, as there are no buckets with values to aggregate and so they are skipped, making an empty result set instead of a 'nil' result set. This behavior has been changed to behave in line with other SQL implementations, but the previous behavior can be obtained by explicitly setting ",(0,i.kt)("inlineCode",{parentName:"p"},"skipEmptyBuckets")," on the query context."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/11188"},"#11188")),(0,i.kt)("h4",{id:"druid-reingestion-incompatible-changes"},"Druid reingestion incompatible changes"),(0,i.kt)("p",null,"Batch tasks using a 'Druid' input source to reingest segment data will no longer accept the 'dimensions' and 'metrics' sections of their task spec, and now will internally use a new columns filter to specify which columns from the original segment should be retained. Additionally, timestampSpec is no longer ignored, allowing the __time column to be modified or replaced with a different column. These changes additionally fix a bug where transformed columns would be ignored and unavailable on the new segments."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/10267"},"#10267")),(0,i.kt)("h4",{id:"druid-web-console-no-longer-supports-ie11-and-other-older-browsers"},"Druid web-console no longer supports IE11 and other older browsers"),(0,i.kt)("p",null,"Some things might still work, but it is no longer officially supported so that newer Javascript features can be used to develop the web-console."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/11357"},"#11357")),(0,i.kt)("h4",{id:"changed-default-maximum-segment-loading-queue-size"},"Changed default maximum segment loading queue size"),(0,i.kt)("p",null,"Druid coordinator ",(0,i.kt)("inlineCode",{parentName:"p"},"maxSegmentsInNodeLoadingQueue")," dynamic configuration has been changed from unlimited (",(0,i.kt)("inlineCode",{parentName:"p"},"0"),") to ",(0,i.kt)("inlineCode",{parentName:"p"},"100"),". This should make the coordinator behave in a much more relaxed manner during periods of cluster volatility, such as a rolling upgrade, but caps the total number of segments that will be loaded in any given coordinator cycle to 100 per server, which can slow down the speed at which a completely stopped cluster is started and loaded from deep storage."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/11540"},"#11540")),(0,i.kt)("h2",{id:"0210"},"0.21.0"),(0,i.kt)("h4",{id:"improved-http-status-codes-for-query-errors"},"Improved HTTP status codes for query errors"),(0,i.kt)("p",null,'Before this release, Druid returned the "internal error (500)" for most of the query errors. Now Druid returns different error codes based on their cause. The following table lists the errors and their corresponding codes that has changed:'),(0,i.kt)("table",null,(0,i.kt)("thead",{parentName:"table"},(0,i.kt)("tr",{parentName:"thead"},(0,i.kt)("th",{parentName:"tr",align:null},"Exception"),(0,i.kt)("th",{parentName:"tr",align:null},"Description"),(0,i.kt)("th",{parentName:"tr",align:null},"Old code"),(0,i.kt)("th",{parentName:"tr",align:null},"New code"))),(0,i.kt)("tbody",{parentName:"table"},(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"SqlParseException and ValidationException from Calcite"),(0,i.kt)("td",{parentName:"tr",align:null},"Query planning failed"),(0,i.kt)("td",{parentName:"tr",align:null},"500"),(0,i.kt)("td",{parentName:"tr",align:null},"400")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"QueryTimeoutException"),(0,i.kt)("td",{parentName:"tr",align:null},"Query execution didn't finish in timeout"),(0,i.kt)("td",{parentName:"tr",align:null},"500"),(0,i.kt)("td",{parentName:"tr",align:null},"504")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"ResourceLimitExceededException"),(0,i.kt)("td",{parentName:"tr",align:null},"Query asked more resources than configured threshold"),(0,i.kt)("td",{parentName:"tr",align:null},"500"),(0,i.kt)("td",{parentName:"tr",align:null},"400")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"InsufficientResourceException"),(0,i.kt)("td",{parentName:"tr",align:null},"Query failed to schedule because of lack of merge buffers available at the time when it was submitted"),(0,i.kt)("td",{parentName:"tr",align:null},"500"),(0,i.kt)("td",{parentName:"tr",align:null},"429, merged to QueryCapacityExceededException")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},"QueryUnsupportedException"),(0,i.kt)("td",{parentName:"tr",align:null},"Unsupported functionality"),(0,i.kt)("td",{parentName:"tr",align:null},"400"),(0,i.kt)("td",{parentName:"tr",align:null},"501")))),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/10464"},"#10464"),"\n",(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/10746"},"#10746")),(0,i.kt)("h4",{id:"query-interrupted-metric"},"Query interrupted metric"),(0,i.kt)("p",null,(0,i.kt)("inlineCode",{parentName:"p"},"query/interrupted/count")," no longer counts the queries that timed out. These queries are counted by ",(0,i.kt)("inlineCode",{parentName:"p"},"query/timeout/count"),"."),(0,i.kt)("h4",{id:"context-dimension-in-query-metrics"},"context dimension in query metrics"),(0,i.kt)("p",null,(0,i.kt)("inlineCode",{parentName:"p"},"context")," is now a default dimension emitted for all query metrics. ",(0,i.kt)("inlineCode",{parentName:"p"},"context")," is a JSON-formatted string containing the query context for the query that the emitted metric refers to. The addition of a dimension that was not previously alters some metrics emitted by Druid. You should plan to handle this new ",(0,i.kt)("inlineCode",{parentName:"p"},"context")," dimension in your metrics pipeline. Since the dimension is a JSON-formatted string, a common solution is to parse the dimension and either flatten it or extract the bits you want and discard the full JSON-formatted string blob."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/10578"},"#10578")),(0,i.kt)("h4",{id:"deprecated-support-for-apache-zookeeper-34"},"Deprecated support for Apache ZooKeeper 3.4"),(0,i.kt)("p",null,"As ",(0,i.kt)("a",{parentName:"p",href:"https://mail-archives.apache.org/mod_mbox/zookeeper-user/202004.mbox/%3C41A7EC67-D8F4-4C3A-B2DB-C2741C2EECA3%40apache.org%3E"},"ZooKeeper 3.4 has been end-of-life for a while"),", support for ZooKeeper 3.4 is deprecated in 0.21.0 and will be removed in the near future."),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/issues/10780"},"#10780")),(0,i.kt)("h4",{id:"consistent-serialization-format-and-column-naming-convention-for-the-syssegments-table"},"Consistent serialization format and column naming convention for the sys.segments table"),(0,i.kt)("p",null,"All columns in the ",(0,i.kt)("inlineCode",{parentName:"p"},"sys.segments"),' table are now serialized in the JSON format to make them consistent with other system tables. Column names now use the same "snake case" convention.'),(0,i.kt)("p",null,(0,i.kt)("a",{parentName:"p",href:"https://github.com/apache/druid/pull/10481"},"#10481")))}h.isMDXComponent=!0}}]);