blob: 97fc83ae32cc95ff3c4a8e424d80c076089eb888 [file] [log] [blame]
"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[6638],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>c});var a=n(67294);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function l(e){for(var t=1;t<arguments.length;t++){var n=null!=arguments[t]?arguments[t]:{};t%2?r(Object(n),!0).forEach((function(t){i(e,t,n[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(n)):r(Object(n)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(n,t))}))}return e}function s(e,t){if(null==e)return{};var n,a,i=function(e,t){if(null==e)return{};var n,a,i={},r=Object.keys(e);for(a=0;a<r.length;a++)n=r[a],t.indexOf(n)>=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a<r.length;a++)n=r[a],t.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var o=a.createContext({}),d=function(e){var t=a.useContext(o),n=t;return e&&(n="function"==typeof e?e(t):l(l({},t),e)),n},p=function(e){var t=d(e.components);return a.createElement(o.Provider,{value:t},e.children)},m="mdxType",u={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},g=a.forwardRef((function(e,t){var n=e.components,i=e.mdxType,r=e.originalType,o=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),m=d(n),g=i,c=m["".concat(o,".").concat(g)]||m[g]||u[g]||r;return n?a.createElement(c,l(l({ref:t},p),{},{components:n})):a.createElement(c,l({ref:t},p))}));function c(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var r=n.length,l=new Array(r);l[0]=g;var s={};for(var o in t)hasOwnProperty.call(t,o)&&(s[o]=t[o]);s.originalType=e,s[m]="string"==typeof e?e:i,l[1]=s;for(var d=2;d<r;d++)l[d]=n[d];return a.createElement.apply(null,l)}return a.createElement.apply(null,n)}g.displayName="MDXCreateElement"},16799:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>o,default:()=>c,frontMatter:()=>s,metadata:()=>d,toc:()=>m});var a=n(87462),i=n(63366),r=(n(67294),n(3905)),l=["components"],s={id:"native-batch-simple-task",title:"JSON-based batch simple task indexing",sidebar_label:"JSON-based batch (simple)"},o=void 0,d={unversionedId:"ingestion/native-batch-simple-task",id:"ingestion/native-batch-simple-task",title:"JSON-based batch simple task indexing",description:"\x3c!--",source:"@site/docs/27.0.0/ingestion/native-batch-simple-task.md",sourceDirName:"ingestion",slug:"/ingestion/native-batch-simple-task",permalink:"/docs/27.0.0/ingestion/native-batch-simple-task",draft:!1,tags:[],version:"current",frontMatter:{id:"native-batch-simple-task",title:"JSON-based batch simple task indexing",sidebar_label:"JSON-based batch (simple)"}},p={},m=[{value:"Simple task example",id:"simple-task-example",level:2},{value:"Simple task configuration",id:"simple-task-configuration",level:2},{value:"<code>dataSchema</code>",id:"dataschema",level:3},{value:"<code>ioConfig</code>",id:"ioconfig",level:3},{value:"<code>tuningConfig</code>",id:"tuningconfig",level:3},{value:"<code>partitionsSpec</code>",id:"partitionsspec",level:3},{value:"Segment pushing modes",id:"segment-pushing-modes",level:2}],u={toc:m},g="wrapper";function c(e){var t=e.components,n=(0,i.Z)(e,l);return(0,r.kt)(g,(0,a.Z)({},u,n,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("admonition",{type:"info"},(0,r.kt)("p",{parentName:"admonition"}," This page describes native batch ingestion using ",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/ingestion/ingestion-spec"},"ingestion specs"),". Refer to the ",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/ingestion/#batch"},"ingestion\nmethods")," table to determine which ingestion method is right for you.")),(0,r.kt)("p",null,"The simple task (",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/ingestion/tasks"},"task type")," ",(0,r.kt)("inlineCode",{parentName:"p"},"index"),") executes single-threaded as a single task within the indexing service. For parallel, scalable options consider using ",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/ingestion/native-batch"},(0,r.kt)("inlineCode",{parentName:"a"},"index_parallel")," tasks")," or ",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/multi-stage-query/"},"SQL-based batch ingestion"),"."),(0,r.kt)("h2",{id:"simple-task-example"},"Simple task example"),(0,r.kt)("p",null,"A sample task is shown below:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-json"},'{\n "type" : "index",\n "spec" : {\n "dataSchema" : {\n "dataSource" : "wikipedia",\n "timestampSpec" : {\n "column" : "timestamp",\n "format" : "auto"\n },\n "dimensionsSpec" : {\n "dimensions": ["country", "page","language","user","unpatrolled","newPage","robot","anonymous","namespace","continent","region","city"],\n "dimensionExclusions" : []\n },\n "metricsSpec" : [\n {\n "type" : "count",\n "name" : "count"\n },\n {\n "type" : "doubleSum",\n "name" : "added",\n "fieldName" : "added"\n },\n {\n "type" : "doubleSum",\n "name" : "deleted",\n "fieldName" : "deleted"\n },\n {\n "type" : "doubleSum",\n "name" : "delta",\n "fieldName" : "delta"\n }\n ],\n "granularitySpec" : {\n "type" : "uniform",\n "segmentGranularity" : "DAY",\n "queryGranularity" : "NONE",\n "intervals" : [ "2013-08-31/2013-09-01" ]\n }\n },\n "ioConfig" : {\n "type" : "index",\n "inputSource" : {\n "type" : "local",\n "baseDir" : "examples/indexing/",\n "filter" : "wikipedia_data.json"\n },\n "inputFormat": {\n "type": "json"\n }\n },\n "tuningConfig" : {\n "type" : "index",\n "partitionsSpec": {\n "type": "single_dim",\n "partitionDimension": "country",\n "targetRowsPerSegment": 5000000\n }\n }\n }\n}\n')),(0,r.kt)("h2",{id:"simple-task-configuration"},"Simple task configuration"),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"property"),(0,r.kt)("th",{parentName:"tr",align:null},"description"),(0,r.kt)("th",{parentName:"tr",align:null},"required?"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"type"),(0,r.kt)("td",{parentName:"tr",align:null},"The task type, this should always be ",(0,r.kt)("inlineCode",{parentName:"td"},"index"),"."),(0,r.kt)("td",{parentName:"tr",align:null},"yes")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"id"),(0,r.kt)("td",{parentName:"tr",align:null},"The task ID. If this is not explicitly specified, Druid generates the task ID using task type, data source name, interval, and date-time stamp."),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"spec"),(0,r.kt)("td",{parentName:"tr",align:null},"The ingestion spec including the ",(0,r.kt)("a",{parentName:"td",href:"#dataschema"},"data schema"),", ",(0,r.kt)("a",{parentName:"td",href:"#ioconfig"},"IO config"),", and ",(0,r.kt)("a",{parentName:"td",href:"#tuningconfig"},"tuning config"),"."),(0,r.kt)("td",{parentName:"tr",align:null},"yes")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"context"),(0,r.kt)("td",{parentName:"tr",align:null},"Context to specify various task configuration parameters. See ",(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/tasks#context-parameters"},"Task context parameters")," for more details."),(0,r.kt)("td",{parentName:"tr",align:null},"no")))),(0,r.kt)("h3",{id:"dataschema"},(0,r.kt)("inlineCode",{parentName:"h3"},"dataSchema")),(0,r.kt)("p",null,"This field is required."),(0,r.kt)("p",null,"See the ",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/ingestion/ingestion-spec#dataschema"},(0,r.kt)("inlineCode",{parentName:"a"},"dataSchema"))," section of the ingestion docs for details."),(0,r.kt)("p",null,"If you do not specify ",(0,r.kt)("inlineCode",{parentName:"p"},"intervals")," explicitly in your dataSchema's granularitySpec, the Local Index Task will do an extra\npass over the data to determine the range to lock when it starts up. If you specify ",(0,r.kt)("inlineCode",{parentName:"p"},"intervals")," explicitly, any rows\noutside the specified intervals will be thrown away. We recommend setting ",(0,r.kt)("inlineCode",{parentName:"p"},"intervals")," explicitly if you know the time\nrange of the data because it allows the task to skip the extra pass, and so that you don't accidentally replace data outside\nthat range if there's some stray data with unexpected timestamps."),(0,r.kt)("h3",{id:"ioconfig"},(0,r.kt)("inlineCode",{parentName:"h3"},"ioConfig")),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"property"),(0,r.kt)("th",{parentName:"tr",align:null},"description"),(0,r.kt)("th",{parentName:"tr",align:null},"default"),(0,r.kt)("th",{parentName:"tr",align:null},"required?"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"type"),(0,r.kt)("td",{parentName:"tr",align:null},'The task type, this should always be "index".'),(0,r.kt)("td",{parentName:"tr",align:null},"none"),(0,r.kt)("td",{parentName:"tr",align:null},"yes")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"inputFormat"),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/data-formats#input-format"},(0,r.kt)("inlineCode",{parentName:"a"},"inputFormat"))," to specify how to parse input data."),(0,r.kt)("td",{parentName:"tr",align:null},"none"),(0,r.kt)("td",{parentName:"tr",align:null},"yes")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"appendToExisting"),(0,r.kt)("td",{parentName:"tr",align:null},"Creates segments as additional shards of the latest version, effectively appending to the segment set instead of replacing it. This means that you can append new segments to any datasource regardless of its original partitioning scheme. You must use the ",(0,r.kt)("inlineCode",{parentName:"td"},"dynamic")," partitioning type for the appended segments. If you specify a different partitioning type, the task fails with an error."),(0,r.kt)("td",{parentName:"tr",align:null},"false"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"dropExisting"),(0,r.kt)("td",{parentName:"tr",align:null},"If this setting is ",(0,r.kt)("inlineCode",{parentName:"td"},"false")," then ingestion proceeds as usual. Set this to ",(0,r.kt)("inlineCode",{parentName:"td"},"true")," and ",(0,r.kt)("inlineCode",{parentName:"td"},"appendToExisting")," to ",(0,r.kt)("inlineCode",{parentName:"td"},"false"),' to enforce true "replace" functionality as described next. If ',(0,r.kt)("inlineCode",{parentName:"td"},"true")," and ",(0,r.kt)("inlineCode",{parentName:"td"},"appendToExisting")," is ",(0,r.kt)("inlineCode",{parentName:"td"},"false")," and the ",(0,r.kt)("inlineCode",{parentName:"td"},"granularitySpec")," contains at least one",(0,r.kt)("inlineCode",{parentName:"td"},"interval"),", then the ingestion task will create regular segments for time chunk intervals with input data and ",(0,r.kt)("inlineCode",{parentName:"td"},"tombstones"),' for all other time chunks with no data. The task will publish the data segments and the tombstone segments together when the it publishes new segments. The net effect of the data segments and the tombstones is to completely adhere to a "replace" semantics where the input data contained in the ',(0,r.kt)("inlineCode",{parentName:"td"},"granularitySpec")," intervals replaces all existing data in the intervals even for time chunks that would be empty in the case that no input data was associated with them. In the extreme case when the input data set that falls in the ",(0,r.kt)("inlineCode",{parentName:"td"},"granularitySpec")," intervals is empty all existing data in the interval will be replaced with an empty data set (i.e. with nothing -- all existing data will be covered by ",(0,r.kt)("inlineCode",{parentName:"td"},"tombstones"),"). If ingestion fails, no segments and tombstones will be published. The following two combinations are not supported and will make the ingestion fail with an error: ",(0,r.kt)("inlineCode",{parentName:"td"},"dropExisting")," is ",(0,r.kt)("inlineCode",{parentName:"td"},"true")," and ",(0,r.kt)("inlineCode",{parentName:"td"},"interval")," is not specified in ",(0,r.kt)("inlineCode",{parentName:"td"},"granularitySpec")," or ",(0,r.kt)("inlineCode",{parentName:"td"},"appendToExisting")," is true and ",(0,r.kt)("inlineCode",{parentName:"td"},"dropExisting")," is ",(0,r.kt)("inlineCode",{parentName:"td"},"true"),". WARNING: this functionality is still in beta and even though we are not aware of any bugs, use with caution."),(0,r.kt)("td",{parentName:"tr",align:null},"false"),(0,r.kt)("td",{parentName:"tr",align:null},"no")))),(0,r.kt)("h3",{id:"tuningconfig"},(0,r.kt)("inlineCode",{parentName:"h3"},"tuningConfig")),(0,r.kt)("p",null,"The tuningConfig is optional and default parameters will be used if no tuningConfig is specified. See below for more details."),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"property"),(0,r.kt)("th",{parentName:"tr",align:null},"description"),(0,r.kt)("th",{parentName:"tr",align:null},"default"),(0,r.kt)("th",{parentName:"tr",align:null},"required?"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"type"),(0,r.kt)("td",{parentName:"tr",align:null},'The task type, this should always be "index".'),(0,r.kt)("td",{parentName:"tr",align:null},"none"),(0,r.kt)("td",{parentName:"tr",align:null},"yes")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxRowsInMemory"),(0,r.kt)("td",{parentName:"tr",align:null},"Used in determining when intermediate persists to disk should occur. Normally user does not need to set this, but depending on the nature of data, if rows are short in terms of bytes, user may not want to store a million rows in memory and this value should be set."),(0,r.kt)("td",{parentName:"tr",align:null},"1000000"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxBytesInMemory"),(0,r.kt)("td",{parentName:"tr",align:null},"Used in determining when intermediate persists to disk should occur. Normally this is computed internally and user does not need to set it. This value represents number of bytes to aggregate in heap memory before persisting. This is based on a rough estimate of memory usage and not actual usage. The maximum heap memory usage for indexing is maxBytesInMemory * (2 + maxPendingPersists). Note that ",(0,r.kt)("inlineCode",{parentName:"td"},"maxBytesInMemory")," also includes heap usage of artifacts created from intermediary persists. This means that after every persist, the amount of ",(0,r.kt)("inlineCode",{parentName:"td"},"maxBytesInMemory")," until next persist will decreases, and task will fail when the sum of bytes of all intermediary persisted artifacts exceeds ",(0,r.kt)("inlineCode",{parentName:"td"},"maxBytesInMemory"),"."),(0,r.kt)("td",{parentName:"tr",align:null},"1/6 of max JVM memory"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxTotalRows"),(0,r.kt)("td",{parentName:"tr",align:null},"Deprecated. Use ",(0,r.kt)("inlineCode",{parentName:"td"},"partitionsSpec")," instead. Total number of rows in segments waiting for being pushed. Used in determining when intermediate pushing should occur."),(0,r.kt)("td",{parentName:"tr",align:null},"20000000"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"numShards"),(0,r.kt)("td",{parentName:"tr",align:null},"Deprecated. Use ",(0,r.kt)("inlineCode",{parentName:"td"},"partitionsSpec")," instead. Directly specify the number of shards to create. If this is specified and ",(0,r.kt)("inlineCode",{parentName:"td"},"intervals")," is specified in the ",(0,r.kt)("inlineCode",{parentName:"td"},"granularitySpec"),", the index task can skip the determine intervals/partitions pass through the data."),(0,r.kt)("td",{parentName:"tr",align:null},"null"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"partitionDimensions"),(0,r.kt)("td",{parentName:"tr",align:null},"Deprecated. Use ",(0,r.kt)("inlineCode",{parentName:"td"},"partitionsSpec")," instead. The dimensions to partition on. Leave blank to select all dimensions. Only used with ",(0,r.kt)("inlineCode",{parentName:"td"},"forceGuaranteedRollup")," = true, will be ignored otherwise."),(0,r.kt)("td",{parentName:"tr",align:null},"null"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"partitionsSpec"),(0,r.kt)("td",{parentName:"tr",align:null},"Defines how to partition data in each timeChunk, see ",(0,r.kt)("a",{parentName:"td",href:"#partitionsspec"},"PartitionsSpec")),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"dynamic")," if ",(0,r.kt)("inlineCode",{parentName:"td"},"forceGuaranteedRollup")," = false, ",(0,r.kt)("inlineCode",{parentName:"td"},"hashed")," if ",(0,r.kt)("inlineCode",{parentName:"td"},"forceGuaranteedRollup")," = true"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"indexSpec"),(0,r.kt)("td",{parentName:"tr",align:null},"Defines segment storage format options to be used at indexing time, see ",(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/ingestion-spec#indexspec"},"IndexSpec")),(0,r.kt)("td",{parentName:"tr",align:null},"null"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"indexSpecForIntermediatePersists"),(0,r.kt)("td",{parentName:"tr",align:null},"Defines segment storage format options to be used at indexing time for intermediate persisted temporary segments. This can be used to disable dimension/metric compression on intermediate segments to reduce memory required for final merging. However, disabling compression on intermediate segments might increase page cache use while they are used before getting merged into final segment published, see ",(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/ingestion-spec#indexspec"},"IndexSpec")," for possible values."),(0,r.kt)("td",{parentName:"tr",align:null},"same as indexSpec"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxPendingPersists"),(0,r.kt)("td",{parentName:"tr",align:null},"Maximum number of persists that can be pending but not started. If this limit would be exceeded by a new intermediate persist, ingestion will block until the currently-running persist finishes. Maximum heap memory usage for indexing scales with maxRowsInMemory * (2 + maxPendingPersists)."),(0,r.kt)("td",{parentName:"tr",align:null},"0 (meaning one persist can be running concurrently with ingestion, and none can be queued up)"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"forceGuaranteedRollup"),(0,r.kt)("td",{parentName:"tr",align:null},"Forces guaranteeing the ",(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/rollup"},"perfect rollup"),". The perfect rollup optimizes the total size of generated segments and querying time while indexing time will be increased. If this is set to true, the index task will read the entire input data twice: one for finding the optimal number of partitions per time chunk and one for generating segments. Note that the result segments would be hash-partitioned. This flag cannot be used with ",(0,r.kt)("inlineCode",{parentName:"td"},"appendToExisting")," of IOConfig. For more details, see the below ",(0,r.kt)("strong",{parentName:"td"},"Segment pushing modes")," section."),(0,r.kt)("td",{parentName:"tr",align:null},"false"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"reportParseExceptions"),(0,r.kt)("td",{parentName:"tr",align:null},"DEPRECATED. If true, exceptions encountered during parsing will be thrown and will halt ingestion; if false, unparseable rows and fields will be skipped. Setting ",(0,r.kt)("inlineCode",{parentName:"td"},"reportParseExceptions")," to true will override existing configurations for ",(0,r.kt)("inlineCode",{parentName:"td"},"maxParseExceptions")," and ",(0,r.kt)("inlineCode",{parentName:"td"},"maxSavedParseExceptions"),", setting ",(0,r.kt)("inlineCode",{parentName:"td"},"maxParseExceptions")," to 0 and limiting ",(0,r.kt)("inlineCode",{parentName:"td"},"maxSavedParseExceptions")," to no more than 1."),(0,r.kt)("td",{parentName:"tr",align:null},"false"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"pushTimeout"),(0,r.kt)("td",{parentName:"tr",align:null},"Milliseconds to wait for pushing segments. It must be >= 0, where 0 means to wait forever."),(0,r.kt)("td",{parentName:"tr",align:null},"0"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"segmentWriteOutMediumFactory"),(0,r.kt)("td",{parentName:"tr",align:null},"Segment write-out medium to use when creating segments. See ",(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/native-batch#segmentwriteoutmediumfactory"},"SegmentWriteOutMediumFactory"),"."),(0,r.kt)("td",{parentName:"tr",align:null},"Not specified, the value from ",(0,r.kt)("inlineCode",{parentName:"td"},"druid.peon.defaultSegmentWriteOutMediumFactory.type")," is used"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"logParseExceptions"),(0,r.kt)("td",{parentName:"tr",align:null},"If true, log an error message when a parsing exception occurs, containing information about the row where the error occurred."),(0,r.kt)("td",{parentName:"tr",align:null},"false"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxParseExceptions"),(0,r.kt)("td",{parentName:"tr",align:null},"The maximum number of parse exceptions that can occur before the task halts ingestion and fails. Overridden if ",(0,r.kt)("inlineCode",{parentName:"td"},"reportParseExceptions")," is set."),(0,r.kt)("td",{parentName:"tr",align:null},"unlimited"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxSavedParseExceptions"),(0,r.kt)("td",{parentName:"tr",align:null},'When a parse exception occurs, Druid can keep track of the most recent parse exceptions. "maxSavedParseExceptions" limits how many exception instances will be saved. These saved exceptions will be made available after the task finishes in the ',(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/tasks#task-reports"},"task completion report"),". Overridden if ",(0,r.kt)("inlineCode",{parentName:"td"},"reportParseExceptions")," is set."),(0,r.kt)("td",{parentName:"tr",align:null},"0"),(0,r.kt)("td",{parentName:"tr",align:null},"no")))),(0,r.kt)("h3",{id:"partitionsspec"},(0,r.kt)("inlineCode",{parentName:"h3"},"partitionsSpec")),(0,r.kt)("p",null,"PartitionsSpec is to describe the secondary partitioning method.\nYou should use different partitionsSpec depending on the ",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/ingestion/rollup"},"rollup mode")," you want.\nFor perfect rollup, you should use ",(0,r.kt)("inlineCode",{parentName:"p"},"hashed"),"."),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"property"),(0,r.kt)("th",{parentName:"tr",align:null},"description"),(0,r.kt)("th",{parentName:"tr",align:null},"default"),(0,r.kt)("th",{parentName:"tr",align:null},"required?"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"type"),(0,r.kt)("td",{parentName:"tr",align:null},"This should always be ",(0,r.kt)("inlineCode",{parentName:"td"},"hashed")),(0,r.kt)("td",{parentName:"tr",align:null},"none"),(0,r.kt)("td",{parentName:"tr",align:null},"yes")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxRowsPerSegment"),(0,r.kt)("td",{parentName:"tr",align:null},"Used in sharding. Determines how many rows are in each segment."),(0,r.kt)("td",{parentName:"tr",align:null},"5000000"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"numShards"),(0,r.kt)("td",{parentName:"tr",align:null},"Directly specify the number of shards to create. If this is specified and ",(0,r.kt)("inlineCode",{parentName:"td"},"intervals")," is specified in the ",(0,r.kt)("inlineCode",{parentName:"td"},"granularitySpec"),", the index task can skip the determine intervals/partitions pass through the data. ",(0,r.kt)("inlineCode",{parentName:"td"},"numShards")," cannot be specified if ",(0,r.kt)("inlineCode",{parentName:"td"},"maxRowsPerSegment")," is set."),(0,r.kt)("td",{parentName:"tr",align:null},"null"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"partitionDimensions"),(0,r.kt)("td",{parentName:"tr",align:null},"The dimensions to partition on. Leave blank to select all dimensions."),(0,r.kt)("td",{parentName:"tr",align:null},"null"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"partitionFunction"),(0,r.kt)("td",{parentName:"tr",align:null},"A function to compute hash of partition dimensions. See ",(0,r.kt)("a",{parentName:"td",href:"/docs/27.0.0/ingestion/native-batch#hash-partition-function"},"Hash partition function")),(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("inlineCode",{parentName:"td"},"murmur3_32_abs")),(0,r.kt)("td",{parentName:"tr",align:null},"no")))),(0,r.kt)("p",null,"For best-effort rollup, you should use ",(0,r.kt)("inlineCode",{parentName:"p"},"dynamic"),"."),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"property"),(0,r.kt)("th",{parentName:"tr",align:null},"description"),(0,r.kt)("th",{parentName:"tr",align:null},"default"),(0,r.kt)("th",{parentName:"tr",align:null},"required?"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"type"),(0,r.kt)("td",{parentName:"tr",align:null},"This should always be ",(0,r.kt)("inlineCode",{parentName:"td"},"dynamic")),(0,r.kt)("td",{parentName:"tr",align:null},"none"),(0,r.kt)("td",{parentName:"tr",align:null},"yes")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxRowsPerSegment"),(0,r.kt)("td",{parentName:"tr",align:null},"Used in sharding. Determines how many rows are in each segment."),(0,r.kt)("td",{parentName:"tr",align:null},"5000000"),(0,r.kt)("td",{parentName:"tr",align:null},"no")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"maxTotalRows"),(0,r.kt)("td",{parentName:"tr",align:null},"Total number of rows in segments waiting for being pushed."),(0,r.kt)("td",{parentName:"tr",align:null},"20000000"),(0,r.kt)("td",{parentName:"tr",align:null},"no")))),(0,r.kt)("h2",{id:"segment-pushing-modes"},"Segment pushing modes"),(0,r.kt)("p",null,"While ingesting data using the simple task indexing, Druid creates segments from the input data and pushes them. For segment pushing,\nthe simple task index supports the following segment pushing modes based upon your type of ",(0,r.kt)("a",{parentName:"p",href:"/docs/27.0.0/ingestion/rollup"},"rollup"),":"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Bulk pushing mode: Used for perfect rollup. Druid pushes every segment at the very end of the index task. Until then, Druid stores created segments in memory and local storage of the service running the index task. This mode can cause problems if you have limited storage capacity, and is not recommended to use in production.\nTo enable bulk pushing mode, set ",(0,r.kt)("inlineCode",{parentName:"li"},"forceGuaranteedRollup")," in your TuningConfig. You can not use bulk pushing with ",(0,r.kt)("inlineCode",{parentName:"li"},"appendToExisting")," in your IOConfig."),(0,r.kt)("li",{parentName:"ul"},"Incremental pushing mode: Used for best-effort rollup. Druid pushes segments are incrementally during the course of the indexing task. The index task collects data and stores created segments in the memory and disks of the services running the task until the total number of collected rows exceeds ",(0,r.kt)("inlineCode",{parentName:"li"},"maxTotalRows"),". At that point the index task immediately pushes all segments created up until that moment, cleans up pushed segments, and continues to ingest the remaining data.")))}c.isMDXComponent=!0}}]);