blob: 9b17c8895d9f223147408eea3f89ea0cfd126f23 [file] [log] [blame]
"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6677],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>u});var a=n(67294);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function o(e){for(var t=1;t<arguments.length;t++){var n=null!=arguments[t]?arguments[t]:{};t%2?r(Object(n),!0).forEach((function(t){i(e,t,n[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(n)):r(Object(n)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(n,t))}))}return e}function s(e,t){if(null==e)return{};var n,a,i=function(e,t){if(null==e)return{};var n,a,i={},r=Object.keys(e);for(a=0;a<r.length;a++)n=r[a],t.indexOf(n)>=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a<r.length;a++)n=r[a],t.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var l=a.createContext({}),d=function(e){var t=a.useContext(l),n=t;return e&&(n="function"==typeof e?e(t):o(o({},t),e)),n},p=function(e){var t=d(e.components);return a.createElement(l.Provider,{value:t},e.children)},k="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},c=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,r=e.originalType,l=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),k=d(n),c=i,u=k["".concat(l,".").concat(c)]||k[c]||m[c]||r;return n?a.createElement(u,o(o({ref:t},p),{},{components:n})):a.createElement(u,o({ref:t},p))}));function u(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var r=n.length,o=new Array(r);o[0]=c;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[k]="string"==typeof e?e:i,o[1]=s;for(var d=2;d<r;d++)o[d]=n[d];return a.createElement.apply(null,o)}return a.createElement.apply(null,n)}c.displayName="MDXCreateElement"},36669:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>l,default:()=>u,frontMatter:()=>s,metadata:()=>d,toc:()=>k});var a=n(87462),i=n(63366),r=(n(67294),n(3905)),o=["components"],s={id:"tasks",title:"Task reference",sidebar_label:"Task reference"},l=void 0,d={unversionedId:"ingestion/tasks",id:"ingestion/tasks",title:"Task reference",description:"\x3c!--",source:"@site/docs/28.0.1/ingestion/tasks.md",sourceDirName:"ingestion",slug:"/ingestion/tasks",permalink:"/docs/28.0.1/ingestion/tasks",draft:!1,tags:[],version:"current",frontMatter:{id:"tasks",title:"Task reference",sidebar_label:"Task reference"},sidebar:"docs",previous:{title:"Partitioning",permalink:"/docs/28.0.1/ingestion/partitioning"},next:{title:"SQL-based ingestion",permalink:"/docs/28.0.1/multi-stage-query/"}},p={},k=[{value:"Task API",id:"task-api",level:2},{value:"Task reports",id:"task-reports",level:2},{value:"Completion report",id:"completion-report",level:3},{value:"Segment Availability Fields",id:"segment-availability-fields",level:4},{value:"Live report",id:"live-report",level:3},{value:"Live reports",id:"live-reports",level:2},{value:"Row stats",id:"row-stats",level:3},{value:"Unparseable events",id:"unparseable-events",level:3},{value:"Task lock system",id:"task-lock-system",level:2},{value:"&quot;Overshadowing&quot; between segments",id:"overshadowing-between-segments",level:3},{value:"Locking",id:"locking",level:3},{value:"Lock priority",id:"lock-priority",level:3},{value:"Task actions",id:"task-actions",level:2},{value:"Batching <code>segmentAllocate</code> actions",id:"batching-segmentallocate-actions",level:3},{value:"Context parameters",id:"context-parameters",level:2},{value:"Task logs",id:"task-logs",level:2},{value:"Configuring task storage sizes",id:"configuring-task-storage-sizes",level:2},{value:"All task types",id:"all-task-types",level:2},{value:"<code>index_parallel</code>",id:"index_parallel",level:3},{value:"<code>index_hadoop</code>",id:"index_hadoop",level:3},{value:"<code>index_kafka</code>",id:"index_kafka",level:3},{value:"<code>index_kinesis</code>",id:"index_kinesis",level:3},{value:"<code>compact</code>",id:"compact",level:3},{value:"<code>kill</code>",id:"kill",level:3}],m={toc:k},c="wrapper";function u(e){var t=e.components,n=(0,i.Z)(e,o);return(0,r.kt)(c,(0,a.Z)({},m,n,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("p",null,"Tasks do all ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/ingestion/"},"ingestion"),"-related work in Druid."),(0,r.kt)("p",null,"For batch ingestion, you will generally submit tasks directly to Druid using the\n",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/api-reference/tasks-api"},"Tasks APIs"),". For streaming ingestion, tasks are generally submitted for you by a\nsupervisor."),(0,r.kt)("h2",{id:"task-api"},"Task API"),(0,r.kt)("p",null,"Task APIs are available in two main places:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"The ",(0,r.kt)("a",{parentName:"li",href:"/docs/28.0.1/design/overlord"},"Overlord")," process offers HTTP APIs to submit tasks, cancel tasks, check their status,\nreview logs and reports, and more. Refer to the ",(0,r.kt)("a",{parentName:"li",href:"/docs/28.0.1/api-reference/tasks-api"},"Tasks API reference")," for a\nfull list."),(0,r.kt)("li",{parentName:"ul"},"Druid SQL includes a ",(0,r.kt)("a",{parentName:"li",href:"/docs/28.0.1/querying/sql-metadata-tables#tasks-table"},(0,r.kt)("inlineCode",{parentName:"a"},"sys.tasks"))," table that provides information about currently\nrunning tasks. This table is read-only, and has a limited (but useful!) subset of the full information available through\nthe Overlord APIs.")),(0,r.kt)("a",{name:"reports"}),(0,r.kt)("h2",{id:"task-reports"},"Task reports"),(0,r.kt)("p",null,"A report containing information about the number of rows ingested, and any parse exceptions that occurred is available for both completed tasks and running tasks."),(0,r.kt)("p",null,"The reporting feature is supported by ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/ingestion/native-batch"},"native batch tasks"),", the Hadoop batch task, and Kafka and Kinesis ingestion tasks."),(0,r.kt)("h3",{id:"completion-report"},"Completion report"),(0,r.kt)("p",null,"After a task completes, if it supports reports, its report can be retrieved at:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"},"http://<OVERLORD-HOST>:<OVERLORD-PORT>/druid/indexer/v1/task/<task-id>/reports\n")),(0,r.kt)("p",null,"An example output is shown below:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-json"},'{\n "ingestionStatsAndErrors": {\n "taskId": "compact_twitter_2018-09-24T18:24:23.920Z",\n "payload": {\n "ingestionState": "COMPLETED",\n "unparseableEvents": {},\n "rowStats": {\n "determinePartitions": {\n "processed": 0,\n "processedBytes": 0,\n "processedWithError": 0,\n "thrownAway": 0,\n "unparseable": 0\n },\n "buildSegments": {\n "processed": 5390324,\n "processedBytes": 5109573212,\n "processedWithError": 0,\n "thrownAway": 0,\n "unparseable": 0\n }\n },\n "segmentAvailabilityConfirmed": false,\n "segmentAvailabilityWaitTimeMs": 0,\n "errorMsg": null\n },\n "type": "ingestionStatsAndErrors"\n }\n}\n')),(0,r.kt)("h4",{id:"segment-availability-fields"},"Segment Availability Fields"),(0,r.kt)("p",null,"For some task types, the indexing task can wait for the newly ingested segments to become available for queries after ingestion completes. The below fields inform the end user regarding the duration and result of the availability wait. For batch ingestion task types, refer to ",(0,r.kt)("inlineCode",{parentName:"p"},"tuningConfig")," docs to see if the task supports an availability waiting period."),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"Field"),(0,r.kt)("th",{parentName:"tr",align:null},"Description"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"segmentAvailabilityConfirmed")),(0,r.kt)("td",{parentName:"tr",align:null},"Whether all segments generated by this ingestion task had been confirmed as available for queries in the cluster before the task completed.")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"segmentAvailabilityWaitTimeMs")),(0,r.kt)("td",{parentName:"tr",align:null},"Milliseconds waited by the ingestion task for the newly ingested segments to be available for query after completing ingestion was completed.")))),(0,r.kt)("h3",{id:"live-report"},"Live report"),(0,r.kt)("p",null,"When a task is running, a live report containing ingestion state, unparseable events and moving average for number of events processed for 1 min, 5 min, 15 min time window can be retrieved at:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"},"http://<OVERLORD-HOST>:<OVERLORD-PORT>/druid/indexer/v1/task/<task-id>/reports\n")),(0,r.kt)("p",null,"An example output is shown below:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-json"},'{\n "ingestionStatsAndErrors": {\n "taskId": "compact_twitter_2018-09-24T18:24:23.920Z",\n "payload": {\n "ingestionState": "RUNNING",\n "unparseableEvents": {},\n "rowStats": {\n "movingAverages": {\n "buildSegments": {\n "5m": {\n "processed": 3.392158326408501,\n "processedBytes": 627.5492903856,\n "unparseable": 0,\n "thrownAway": 0,\n "processedWithError": 0\n },\n "15m": {\n "processed": 1.736165476881023,\n "processedBytes": 321.1906130223,\n "unparseable": 0,\n "thrownAway": 0,\n "processedWithError": 0\n },\n "1m": {\n "processed": 4.206417693750045,\n "processedBytes": 778.1872733438,\n "unparseable": 0,\n "thrownAway": 0,\n "processedWithError": 0\n }\n }\n },\n "totals": {\n "buildSegments": {\n "processed": 1994,\n "processedBytes": 3425110,\n "processedWithError": 0,\n "thrownAway": 0,\n "unparseable": 0\n }\n }\n },\n "errorMsg": null\n },\n "type": "ingestionStatsAndErrors"\n }\n}\n')),(0,r.kt)("p",null,"A description of the fields:"),(0,r.kt)("p",null,"The ",(0,r.kt)("inlineCode",{parentName:"p"},"ingestionStatsAndErrors")," report provides information about row counts and errors."),(0,r.kt)("p",null,"The ",(0,r.kt)("inlineCode",{parentName:"p"},"ingestionState")," shows what step of ingestion the task reached. Possible states include:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"NOT_STARTED"),": The task has not begun reading any rows"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"DETERMINE_PARTITIONS"),": The task is processing rows to determine partitioning"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"BUILD_SEGMENTS"),": The task is processing rows to construct segments"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"COMPLETED"),": The task has finished its work.")),(0,r.kt)("p",null,"Only batch tasks have the DETERMINE_PARTITIONS phase. Realtime tasks such as those created by the Kafka Indexing Service do not have a DETERMINE_PARTITIONS phase."),(0,r.kt)("p",null,(0,r.kt)("inlineCode",{parentName:"p"},"unparseableEvents")," contains lists of exception messages that were caused by unparseable inputs. This can help with identifying problematic input rows. There will be one list each for the DETERMINE_PARTITIONS and BUILD_SEGMENTS phases. Note that the Hadoop batch task does not support saving of unparseable events."),(0,r.kt)("p",null,"the ",(0,r.kt)("inlineCode",{parentName:"p"},"rowStats")," map contains information about row counts. There is one entry for each ingestion phase. The definitions of the different row counts are shown below:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"processed"),": Number of rows successfully ingested without parsing errors"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"processedBytes"),": Total number of uncompressed bytes processed by the task. This reports the total byte size of all rows i.e. even those that are included in ",(0,r.kt)("inlineCode",{parentName:"li"},"processedWithError"),", ",(0,r.kt)("inlineCode",{parentName:"li"},"unparseable")," or ",(0,r.kt)("inlineCode",{parentName:"li"},"thrownAway"),"."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"processedWithError"),": Number of rows that were ingested, but contained a parsing error within one or more columns. This typically occurs where input rows have a parseable structure but invalid types for columns, such as passing in a non-numeric String value for a numeric column."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"thrownAway"),": Number of rows skipped. This includes rows with timestamps that were outside of the ingestion task's defined time interval and rows that were filtered out with a ",(0,r.kt)("a",{parentName:"li",href:"/docs/28.0.1/ingestion/ingestion-spec#transformspec"},(0,r.kt)("inlineCode",{parentName:"a"},"transformSpec")),", but doesn't include the rows skipped by explicit user configurations. For example, the rows skipped by ",(0,r.kt)("inlineCode",{parentName:"li"},"skipHeaderRows")," or ",(0,r.kt)("inlineCode",{parentName:"li"},"hasHeaderRow")," in the CSV format are not counted."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"unparseable"),": Number of rows that could not be parsed at all and were discarded. This tracks input rows without a parseable structure, such as passing in non-JSON data when using a JSON parser.")),(0,r.kt)("p",null,"The ",(0,r.kt)("inlineCode",{parentName:"p"},"errorMsg")," field shows a message describing the error that caused a task to fail. It will be null if the task was successful."),(0,r.kt)("h2",{id:"live-reports"},"Live reports"),(0,r.kt)("h3",{id:"row-stats"},"Row stats"),(0,r.kt)("p",null,"The ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/ingestion/native-batch"},"native batch task"),", the Hadoop batch task, and Kafka and Kinesis ingestion tasks support retrieval of row stats while the task is running."),(0,r.kt)("p",null,"The live report can be accessed with a GET to the following URL on a Peon running a task:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"},"http://<middlemanager-host>:<worker-port>/druid/worker/v1/chat/<task-id>/rowStats\n")),(0,r.kt)("p",null,"An example report is shown below. The ",(0,r.kt)("inlineCode",{parentName:"p"},"movingAverages")," section contains 1 minute, 5 minute, and 15 minute moving averages of increases to the four row counters, which have the same definitions as those in the completion report. The ",(0,r.kt)("inlineCode",{parentName:"p"},"totals")," section shows the current totals."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-json"},'{\n "movingAverages": {\n "buildSegments": {\n "5m": {\n "processed": 3.392158326408501,\n "processedBytes": 627.5492903856,\n "unparseable": 0,\n "thrownAway": 0,\n "processedWithError": 0\n },\n "15m": {\n "processed": 1.736165476881023,\n "processedBytes": 321.1906130223,\n "unparseable": 0,\n "thrownAway": 0,\n "processedWithError": 0\n },\n "1m": {\n "processed": 4.206417693750045,\n "processedBytes": 778.1872733438,\n "unparseable": 0,\n "thrownAway": 0,\n "processedWithError": 0\n }\n }\n },\n "totals": {\n "buildSegments": {\n "processed": 1994,\n "processedBytes": 3425110,\n "processedWithError": 0,\n "thrownAway": 0,\n "unparseable": 0\n }\n }\n}\n')),(0,r.kt)("p",null,"For the Kafka Indexing Service, a GET to the following Overlord API will retrieve live row stat reports from each task being managed by the supervisor and provide a combined report."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"},"http://<OVERLORD-HOST>:<OVERLORD-PORT>/druid/indexer/v1/supervisor/<supervisor-id>/stats\n")),(0,r.kt)("h3",{id:"unparseable-events"},"Unparseable events"),(0,r.kt)("p",null,"Lists of recently-encountered unparseable events can be retrieved from a running task with a GET to the following Peon API:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"},"http://<middlemanager-host>:<worker-port>/druid/worker/v1/chat/<task-id>/unparseableEvents\n")),(0,r.kt)("p",null,"Note that this functionality is not supported by all task types. Currently, it is only supported by the\nnon-parallel ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/ingestion/native-batch"},"native batch task")," (type ",(0,r.kt)("inlineCode",{parentName:"p"},"index"),") and the tasks created by the Kafka\nand Kinesis indexing services."),(0,r.kt)("a",{name:"locks"}),(0,r.kt)("h2",{id:"task-lock-system"},"Task lock system"),(0,r.kt)("p",null,"This section explains the task locking system in Druid. Druid's locking system\nand versioning system are tightly coupled with each other to guarantee the correctness of ingested data."),(0,r.kt)("h3",{id:"overshadowing-between-segments"},'"Overshadowing" between segments'),(0,r.kt)("p",null,"You can run a task to overwrite existing data. The segments created by an overwriting task ",(0,r.kt)("em",{parentName:"p"},"overshadows")," existing segments.\nNote that the overshadow relation holds only for the same time chunk and the same data source.\nThese overshadowed segments are not considered in query processing to filter out stale data."),(0,r.kt)("p",null,"Each segment has a ",(0,r.kt)("em",{parentName:"p"},"major")," version and a ",(0,r.kt)("em",{parentName:"p"},"minor")," version. The major version is\nrepresented as a timestamp in the format of ",(0,r.kt)("a",{parentName:"p",href:"https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat"},(0,r.kt)("inlineCode",{parentName:"a"},"\"yyyy-MM-dd'T'hh:mm:ss\"")),"\nwhile the minor version is an integer number. These major and minor versions\nare used to determine the overshadow relation between segments as seen below."),(0,r.kt)("p",null,"A segment ",(0,r.kt)("inlineCode",{parentName:"p"},"s1")," overshadows another ",(0,r.kt)("inlineCode",{parentName:"p"},"s2")," if"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"s1")," has a higher major version than ",(0,r.kt)("inlineCode",{parentName:"li"},"s2"),", or"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"s1")," has the same major version and a higher minor version than ",(0,r.kt)("inlineCode",{parentName:"li"},"s2"),".")),(0,r.kt)("p",null,"Here are some examples."),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"A segment of the major version of ",(0,r.kt)("inlineCode",{parentName:"li"},"2019-01-01T00:00:00.000Z")," and the minor version of ",(0,r.kt)("inlineCode",{parentName:"li"},"0")," overshadows\nanother of the major version of ",(0,r.kt)("inlineCode",{parentName:"li"},"2018-01-01T00:00:00.000Z")," and the minor version of ",(0,r.kt)("inlineCode",{parentName:"li"},"1"),"."),(0,r.kt)("li",{parentName:"ul"},"A segment of the major version of ",(0,r.kt)("inlineCode",{parentName:"li"},"2019-01-01T00:00:00.000Z")," and the minor version of ",(0,r.kt)("inlineCode",{parentName:"li"},"1")," overshadows\nanother of the major version of ",(0,r.kt)("inlineCode",{parentName:"li"},"2019-01-01T00:00:00.000Z")," and the minor version of ",(0,r.kt)("inlineCode",{parentName:"li"},"0"),".")),(0,r.kt)("h3",{id:"locking"},"Locking"),(0,r.kt)("p",null,"If you are running two or more ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/ingestion/tasks"},"Druid tasks")," which generate segments for the same data source and the same time chunk,\nthe generated segments could potentially overshadow each other, which could lead to incorrect query results."),(0,r.kt)("p",null,"To avoid this problem, tasks will attempt to get locks prior to creating any segment in Druid.\nThere are two types of locks, i.e., ",(0,r.kt)("em",{parentName:"p"},"time chunk lock")," and ",(0,r.kt)("em",{parentName:"p"},"segment lock"),"."),(0,r.kt)("p",null,"When the time chunk lock is used, a task locks the entire time chunk of a data source where generated segments will be written.\nFor example, suppose we have a task ingesting data into the time chunk of ",(0,r.kt)("inlineCode",{parentName:"p"},"2019-01-01T00:00:00.000Z/2019-01-02T00:00:00.000Z")," of the ",(0,r.kt)("inlineCode",{parentName:"p"},"wikipedia")," data source.\nWith the time chunk locking, this task will lock the entire time chunk of ",(0,r.kt)("inlineCode",{parentName:"p"},"2019-01-01T00:00:00.000Z/2019-01-02T00:00:00.000Z")," of the ",(0,r.kt)("inlineCode",{parentName:"p"},"wikipedia")," data source\nbefore it creates any segments. As long as it holds the lock, any other tasks will be unable to create segments for the same time chunk of the same data source.\nThe segments created with the time chunk locking have a ",(0,r.kt)("em",{parentName:"p"},"higher")," major version than existing segments. Their minor version is always ",(0,r.kt)("inlineCode",{parentName:"p"},"0"),"."),(0,r.kt)("p",null,"When the segment lock is used, a task locks individual segments instead of the entire time chunk.\nAs a result, two or more tasks can create segments for the same time chunk of the same data source simultaneously\nif they are reading different segments.\nFor example, a Kafka indexing task and a compaction task can always write segments into the same time chunk of the same data source simultaneously.\nThe reason for this is because a Kafka indexing task always appends new segments, while a compaction task always overwrites existing segments.\nThe segments created with the segment locking have the ",(0,r.kt)("em",{parentName:"p"},"same")," major version and a ",(0,r.kt)("em",{parentName:"p"},"higher")," minor version."),(0,r.kt)("admonition",{type:"info"},(0,r.kt)("p",{parentName:"admonition"}," The segment locking is still experimental. It could have unknown bugs which potentially lead to incorrect query results.")),(0,r.kt)("p",null,"To enable segment locking, you may need to set ",(0,r.kt)("inlineCode",{parentName:"p"},"forceTimeChunkLock")," to ",(0,r.kt)("inlineCode",{parentName:"p"},"false")," in the ",(0,r.kt)("a",{parentName:"p",href:"#context"},"task context"),".\nOnce ",(0,r.kt)("inlineCode",{parentName:"p"},"forceTimeChunkLock")," is unset, the task will choose a proper lock type to use automatically.\nPlease note that segment lock is not always available. The most common use case where time chunk lock is enforced is\nwhen an overwriting task changes the segment granularity.\nAlso, the segment locking is supported by only native indexing tasks and Kafka/Kinesis indexing tasks.\nHadoop indexing tasks don't support it."),(0,r.kt)("p",null,(0,r.kt)("inlineCode",{parentName:"p"},"forceTimeChunkLock")," in the task context is only applied to individual tasks.\nIf you want to unset it for all tasks, you would want to set ",(0,r.kt)("inlineCode",{parentName:"p"},"druid.indexer.tasklock.forceTimeChunkLock")," to false in the ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/configuration/#overlord-operations"},"overlord configuration"),"."),(0,r.kt)("p",null,"Lock requests can conflict with each other if two or more tasks try to get locks for the overlapped time chunks of the same data source.\nNote that the lock conflict can happen between different locks types."),(0,r.kt)("p",null,"The behavior on lock conflicts depends on the ",(0,r.kt)("a",{parentName:"p",href:"#lock-priority"},"task priority"),".\nIf all tasks of conflicting lock requests have the same priority, then the task who requested first will get the lock.\nOther tasks will wait for the task to release the lock."),(0,r.kt)("p",null,"If a task of a lower priority asks a lock later than another of a higher priority,\nthis task will also wait for the task of a higher priority to release the lock.\nIf a task of a higher priority asks a lock later than another of a lower priority,\nthen this task will ",(0,r.kt)("em",{parentName:"p"},"preempt")," the other task of a lower priority. The lock\nof the lower-prioritized task will be revoked and the higher-prioritized task will acquire a new lock."),(0,r.kt)("p",null,"This lock preemption can happen at any time while a task is running except\nwhen it is ",(0,r.kt)("em",{parentName:"p"},"publishing segments")," in a critical section. Its locks become preemptible again once publishing segments is finished."),(0,r.kt)("p",null,"Note that locks are shared by the tasks of the same groupId.\nFor example, Kafka indexing tasks of the same supervisor have the same groupId and share all locks with each other."),(0,r.kt)("a",{name:"priority"}),(0,r.kt)("h3",{id:"lock-priority"},"Lock priority"),(0,r.kt)("p",null,"Each task type has a different default lock priority. The below table shows the default priorities of different task types. Higher the number, higher the priority."),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"task type"),(0,r.kt)("th",{parentName:"tr",align:null},"default priority"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Realtime index task"),(0,r.kt)("td",{parentName:"tr",align:null},"75")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Batch index tasks, including ",(0,r.kt)("a",{parentName:"td",href:"/docs/28.0.1/ingestion/native-batch"},"native batch"),", ",(0,r.kt)("a",{parentName:"td",href:"/docs/28.0.1/multi-stage-query/"},"SQL"),", and ",(0,r.kt)("a",{parentName:"td",href:"/docs/28.0.1/ingestion/hadoop"},"Hadoop-based")),(0,r.kt)("td",{parentName:"tr",align:null},"50")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Merge/Append/Compaction task"),(0,r.kt)("td",{parentName:"tr",align:null},"25")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Other tasks"),(0,r.kt)("td",{parentName:"tr",align:null},"0")))),(0,r.kt)("p",null,"You can override the task priority by setting your priority in the task context as below."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-json"},'"context" : {\n "priority" : 100\n}\n')),(0,r.kt)("a",{name:"actions"}),(0,r.kt)("h2",{id:"task-actions"},"Task actions"),(0,r.kt)("p",null,"Task actions are overlord actions performed by tasks during their lifecycle. Some typical task actions are:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"lockAcquire"),": acquires a time-chunk lock on an interval for the task"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"lockRelease"),": releases a lock acquired by the task on an interval"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"segmentTransactionalInsert"),": publishes new segments created by a task and optionally overwrites and/or drops existing segments in a single transaction"),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("inlineCode",{parentName:"li"},"segmentAllocate"),": allocates pending segments to a task to write rows")),(0,r.kt)("h3",{id:"batching-segmentallocate-actions"},"Batching ",(0,r.kt)("inlineCode",{parentName:"h3"},"segmentAllocate")," actions"),(0,r.kt)("p",null,"In a cluster with several concurrent tasks, ",(0,r.kt)("inlineCode",{parentName:"p"},"segmentAllocate")," actions on the overlord can take a long time to finish, causing spikes in the ",(0,r.kt)("inlineCode",{parentName:"p"},"task/action/run/time"),". This can result in ingestion lag building up while a task waits for a segment to be allocated.\nThe root cause of such spikes is likely to be one or more of the following:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"several concurrent tasks trying to allocate segments for the same datasource and interval"),(0,r.kt)("li",{parentName:"ul"},"large number of metadata calls made to the segments and pending segments tables "),(0,r.kt)("li",{parentName:"ul"},"concurrency limitations while acquiring a task lock required for allocating a segment")),(0,r.kt)("p",null,"Since the contention typically arises from tasks allocating segments for the same datasource and interval, you can improve the run times by batching the actions together.\nTo enable batched segment allocation on the overlord, set ",(0,r.kt)("inlineCode",{parentName:"p"},"druid.indexer.tasklock.batchSegmentAllocation")," to ",(0,r.kt)("inlineCode",{parentName:"p"},"true"),". See ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/configuration/#overlord-operations"},"overlord configuration")," for more details."),(0,r.kt)("a",{name:"context"}),(0,r.kt)("h2",{id:"context-parameters"},"Context parameters"),(0,r.kt)("p",null,"The task context is used for various individual task configuration.\nSpecify task context configurations in the ",(0,r.kt)("inlineCode",{parentName:"p"},"context")," field of the ingestion spec.\nWhen configuring ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/data-management/automatic-compaction"},"automatic compaction"),", set the task context configurations in ",(0,r.kt)("inlineCode",{parentName:"p"},"taskContext")," rather than in ",(0,r.kt)("inlineCode",{parentName:"p"},"context"),".\nThe settings get passed into the ",(0,r.kt)("inlineCode",{parentName:"p"},"context")," field of the compaction tasks issued to MiddleManagers."),(0,r.kt)("p",null,"The following parameters apply to all task types."),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"Property"),(0,r.kt)("th",{parentName:"tr",align:null},"Description"),(0,r.kt)("th",{parentName:"tr",align:null},"Default"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"forceTimeChunkLock")),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("em",{parentName:"td"},"Setting this to false is still experimental."),(0,r.kt)("br",null)," Force to use time chunk lock. When ",(0,r.kt)("inlineCode",{parentName:"td"},"true"),", this parameter overrides the overlord runtime property ",(0,r.kt)("inlineCode",{parentName:"td"},"druid.indexer.tasklock.forceTimeChunkLock")," ",(0,r.kt)("a",{parentName:"td",href:"/docs/28.0.1/configuration/#overlord-operations"},"configuration for the overlord"),". If neither this parameter nor the runtime property is ",(0,r.kt)("inlineCode",{parentName:"td"},"true"),", each task automatically chooses a lock type to use. See ",(0,r.kt)("a",{parentName:"td",href:"#locking"},"Locking")," for more details."),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"true"))),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"priority")),(0,r.kt)("td",{parentName:"tr",align:null},"Task priority"),(0,r.kt)("td",{parentName:"tr",align:null},"Depends on the task type. See ",(0,r.kt)("a",{parentName:"td",href:"#priority"},"Priority")," for more details.")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"storeCompactionState")),(0,r.kt)("td",{parentName:"tr",align:null},"Enables the task to store the compaction state of created segments in the metadata store. When ",(0,r.kt)("inlineCode",{parentName:"td"},"true"),", the segments created by the task fill ",(0,r.kt)("inlineCode",{parentName:"td"},"lastCompactionState")," in the segment metadata. This parameter is set automatically on compaction tasks."),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"true")," for compaction tasks, ",(0,r.kt)("inlineCode",{parentName:"td"},"false")," for other task types")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"storeEmptyColumns")),(0,r.kt)("td",{parentName:"tr",align:null},"Enables the task to store empty columns during ingestion. When ",(0,r.kt)("inlineCode",{parentName:"td"},"true"),", Druid stores every column specified in the ",(0,r.kt)("a",{parentName:"td",href:"/docs/28.0.1/ingestion/ingestion-spec#dimensionsspec"},(0,r.kt)("inlineCode",{parentName:"a"},"dimensionsSpec")),". When ",(0,r.kt)("inlineCode",{parentName:"td"},"false"),", Druid SQL queries referencing empty columns will fail. If you intend to leave ",(0,r.kt)("inlineCode",{parentName:"td"},"storeEmptyColumns")," disabled, you should either ingest dummy data for empty columns or else not query on empty columns.",(0,r.kt)("br",null),(0,r.kt)("br",null),"When set in the task context, ",(0,r.kt)("inlineCode",{parentName:"td"},"storeEmptyColumns")," overrides the system property ",(0,r.kt)("a",{parentName:"td",href:"/docs/28.0.1/configuration/#additional-peon-configuration"},(0,r.kt)("inlineCode",{parentName:"a"},"druid.indexer.task.storeEmptyColumns")),"."),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"true"))),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"taskLockTimeout")),(0,r.kt)("td",{parentName:"tr",align:null},"Task lock timeout in milliseconds. For more details, see ",(0,r.kt)("a",{parentName:"td",href:"#locking"},"Locking"),".",(0,r.kt)("br",null),(0,r.kt)("br",null),"When a task acquires a lock, it sends a request via HTTP and awaits until it receives a response containing the lock acquisition result. As a result, an HTTP timeout error can occur if ",(0,r.kt)("inlineCode",{parentName:"td"},"taskLockTimeout")," is greater than ",(0,r.kt)("inlineCode",{parentName:"td"},"druid.server.http.maxIdleTime")," of Overlords."),(0,r.kt)("td",{parentName:"tr",align:null},"300000")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"useLineageBasedSegmentAllocation")),(0,r.kt)("td",{parentName:"tr",align:null},"Enables the new lineage-based segment allocation protocol for the native Parallel task with dynamic partitioning. This option should be off during the replacing rolling upgrade from one of the Druid versions between 0.19 and 0.21 to Druid 0.22 or higher. Once the upgrade is done, it must be set to ",(0,r.kt)("inlineCode",{parentName:"td"},"true")," to ensure data correctness."),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"false")," in 0.21 or earlier, ",(0,r.kt)("inlineCode",{parentName:"td"},"true")," in 0.22 or later")))),(0,r.kt)("h2",{id:"task-logs"},"Task logs"),(0,r.kt)("p",null,"Logs are created by ingestion tasks as they run. You can configure Druid to push these into a repository for long-term storage after they complete."),(0,r.kt)("p",null,"Once the task has been submitted to the Overlord it remains ",(0,r.kt)("inlineCode",{parentName:"p"},"WAITING")," for locks to be acquired. Worker slot allocation is then ",(0,r.kt)("inlineCode",{parentName:"p"},"PENDING")," until the task can actually start executing."),(0,r.kt)("p",null,"The task then starts creating logs in a local directory of the middle manager (or indexer) in a ",(0,r.kt)("inlineCode",{parentName:"p"},"log")," directory for the specific ",(0,r.kt)("inlineCode",{parentName:"p"},"taskId")," at ","[",(0,r.kt)("inlineCode",{parentName:"p"},"druid.worker.baseTaskDirs"),"]"," (../configuration/index.md#middlemanager-configuration)."),(0,r.kt)("p",null,"When the task completes - whether it succeeds or fails - the middle manager (or indexer) will push the task log file into the location specified in ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/configuration/#task-logging"},(0,r.kt)("inlineCode",{parentName:"a"},"druid.indexer.logs")),"."),(0,r.kt)("p",null,"Task logs on the Druid web console are retrieved via an ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/api-reference/service-status-api#overlord"},"API")," on the Overlord. It automatically detects where the log file is, either in the middleManager / indexer or in long-term storage, and passes it back."),(0,r.kt)("p",null,"If you don't see the log file in long-term storage, it means either:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},"the middleManager / indexer failed to push the log file to deep storage or"),(0,r.kt)("li",{parentName:"ol"},"the task did not complete.")),(0,r.kt)("p",null,"You can check the middleManager / indexer logs locally to see if there was a push failure. If there was not, check the Overlord's own process logs to see why the task failed before it started."),(0,r.kt)("admonition",{type:"info"},(0,r.kt)("p",{parentName:"admonition"}," If you are running the indexing service in remote mode, the task logs must be stored in S3, Azure Blob Store, Google Cloud Storage or HDFS.")),(0,r.kt)("p",null,"You can configure retention periods for logs in milliseconds by setting ",(0,r.kt)("inlineCode",{parentName:"p"},"druid.indexer.logs.kill")," properties in ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/configuration/#task-logging"},"configuration"),". The Overlord will then automatically manage task logs in log directories along with entries in task-related metadata storage tables."),(0,r.kt)("admonition",{type:"info"},(0,r.kt)("p",{parentName:"admonition"}," Automatic log file deletion typically works based on the log file's 'modified' timestamp in the back-end store. Large clock skews between Druid processes and the long-term store might result in unintended behavior.")),(0,r.kt)("h2",{id:"configuring-task-storage-sizes"},"Configuring task storage sizes"),(0,r.kt)("p",null,"Tasks sometimes need to use local disk for storage of things while the task is active. For example, for realtime ingestion tasks to accept broadcast segments for broadcast joins. Or intermediate data sets for Multi-stage Query jobs"),(0,r.kt)("p",null,"Task storage sizes are configured through a combination of three properties:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("inlineCode",{parentName:"li"},"druid.worker.capacity"),' - i.e. the "number of task slots"'),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("inlineCode",{parentName:"li"},"druid.worker.baseTaskDirs")," - i.e. the list of directories to use for task storage. "),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("inlineCode",{parentName:"li"},"druid.worker.baseTaskDirSize")," - i.e. the amount of storage to use on each storage location")),(0,r.kt)("p",null,"While it seems like one task might use multiple directories, only one directory from the list of base directories will be used for any given task, as such, each task is only given a singular directory for scratch space."),(0,r.kt)("p",null,"The actual amount of memory assigned to any given task is computed by determining the largest size that enables all task slots to be given an equivalent amount of disk storage. For example, with 5 slots, 2 directories (A and B) and a size of 300 GB, 3 slots would be given to directory A, 2 slots to directory B and each slot would be allowed 100 GB "),(0,r.kt)("h2",{id:"all-task-types"},"All task types"),(0,r.kt)("h3",{id:"index_parallel"},(0,r.kt)("inlineCode",{parentName:"h3"},"index_parallel")),(0,r.kt)("p",null,"See ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/ingestion/native-batch"},"Native batch ingestion (parallel task)"),"."),(0,r.kt)("h3",{id:"index_hadoop"},(0,r.kt)("inlineCode",{parentName:"h3"},"index_hadoop")),(0,r.kt)("p",null,"See ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/ingestion/hadoop"},"Hadoop-based ingestion"),"."),(0,r.kt)("h3",{id:"index_kafka"},(0,r.kt)("inlineCode",{parentName:"h3"},"index_kafka")),(0,r.kt)("p",null,"Submitted automatically, on your behalf, by a\n",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/development/extensions-core/kafka-ingestion"},"Kafka-based ingestion supervisor"),"."),(0,r.kt)("h3",{id:"index_kinesis"},(0,r.kt)("inlineCode",{parentName:"h3"},"index_kinesis")),(0,r.kt)("p",null,"Submitted automatically, on your behalf, by a\n",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/development/extensions-core/kinesis-ingestion"},"Kinesis-based ingestion supervisor"),"."),(0,r.kt)("h3",{id:"compact"},(0,r.kt)("inlineCode",{parentName:"h3"},"compact")),(0,r.kt)("p",null,"Compaction tasks merge all segments of the given interval. See the documentation on\n",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/data-management/compaction"},"compaction")," for details."),(0,r.kt)("h3",{id:"kill"},(0,r.kt)("inlineCode",{parentName:"h3"},"kill")),(0,r.kt)("p",null,"Kill tasks delete all metadata about certain segments and removes them from deep storage.\nSee the documentation on ",(0,r.kt)("a",{parentName:"p",href:"/docs/28.0.1/data-management/delete"},"deleting data")," for details."))}u.isMDXComponent=!0}}]);