blob: 162d771743fac9023d8081e7aee651bf50a39d0c [file] [log] [blame]
"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[8887],{3905:(t,e,n)=>{n.d(e,{Zo:()=>p,kt:()=>g});var a=n(67294);function r(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function i(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(t);e&&(a=a.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),n.push.apply(n,a)}return n}function o(t){for(var e=1;e<arguments.length;e++){var n=null!=arguments[e]?arguments[e]:{};e%2?i(Object(n),!0).forEach((function(e){r(t,e,n[e])})):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(n)):i(Object(n)).forEach((function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(n,e))}))}return t}function l(t,e){if(null==t)return{};var n,a,r=function(t,e){if(null==t)return{};var n,a,r={},i=Object.keys(t);for(a=0;a<i.length;a++)n=i[a],e.indexOf(n)>=0||(r[n]=t[n]);return r}(t,e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(t);for(a=0;a<i.length;a++)n=i[a],e.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(t,n)&&(r[n]=t[n])}return r}var s=a.createContext({}),d=function(t){var e=a.useContext(s),n=e;return t&&(n="function"==typeof t?t(e):o(o({},e),t)),n},p=function(t){var e=d(t.components);return a.createElement(s.Provider,{value:e},t.children)},m="mdxType",u={inlineCode:"code",wrapper:function(t){var e=t.children;return a.createElement(a.Fragment,{},e)}},c=a.forwardRef((function(t,e){var n=t.components,r=t.mdxType,i=t.originalType,s=t.parentName,p=l(t,["components","mdxType","originalType","parentName"]),m=d(n),c=r,g=m["".concat(s,".").concat(c)]||m[c]||u[c]||i;return n?a.createElement(g,o(o({ref:e},p),{},{components:n})):a.createElement(g,o({ref:e},p))}));function g(t,e){var n=arguments,r=e&&e.mdxType;if("string"==typeof t||r){var i=n.length,o=new Array(i);o[0]=c;var l={};for(var s in e)hasOwnProperty.call(e,s)&&(l[s]=e[s]);l.originalType=t,l[m]="string"==typeof t?t:r,o[1]=l;for(var d=2;d<i;d++)o[d]=n[d];return a.createElement.apply(null,o)}return a.createElement.apply(null,n)}c.displayName="MDXCreateElement"},68306:(t,e,n)=>{n.r(e),n.d(e,{assets:()=>p,contentTitle:()=>s,default:()=>g,frontMatter:()=>l,metadata:()=>d,toc:()=>m});var a=n(87462),r=n(63366),i=(n(67294),n(3905)),o=["components"],l={id:"index",title:"Ingestion overview",sidebar_label:"Overview"},s=void 0,d={unversionedId:"ingestion/index",id:"ingestion/index",title:"Ingestion overview",description:"\x3c!--",source:"@site/docs/29.0.1/ingestion/index.md",sourceDirName:"ingestion",slug:"/ingestion/",permalink:"/docs/29.0.1/ingestion/",draft:!1,tags:[],version:"current",frontMatter:{id:"index",title:"Ingestion overview",sidebar_label:"Overview"},sidebar:"docs",previous:{title:"ZooKeeper",permalink:"/docs/29.0.1/design/zookeeper"},next:{title:"Source input formats",permalink:"/docs/29.0.1/ingestion/data-formats"}},p={},m=[{value:"Ingestion methods",id:"ingestion-methods",level:2},{value:"Streaming",id:"streaming",level:3},{value:"Batch",id:"batch",level:3}],u={toc:m},c="wrapper";function g(t){var e=t.components,n=(0,r.Z)(t,o);return(0,i.kt)(c,(0,a.Z)({},u,n,{components:e,mdxType:"MDXLayout"}),(0,i.kt)("p",null,"Loading data in Druid is called ",(0,i.kt)("em",{parentName:"p"},"ingestion")," or ",(0,i.kt)("em",{parentName:"p"},"indexing"),". When you ingest data into Druid, Druid reads the data from\nyour source system and stores it in data files called ",(0,i.kt)("a",{parentName:"p",href:"/docs/29.0.1/design/segments"},(0,i.kt)("em",{parentName:"a"},"segments")),".\nIn general, segment files contain a few million rows each."),(0,i.kt)("p",null,"For most ingestion methods, the Druid ",(0,i.kt)("a",{parentName:"p",href:"/docs/29.0.1/design/middlemanager"},"MiddleManager")," processes or the\n",(0,i.kt)("a",{parentName:"p",href:"/docs/29.0.1/design/indexer"},"Indexer")," processes load your source data. The sole exception is Hadoop-based ingestion, which\nuses a Hadoop MapReduce job on YARN."),(0,i.kt)("p",null,"During ingestion, Druid creates segments and stores them in ",(0,i.kt)("a",{parentName:"p",href:"/docs/29.0.1/design/deep-storage"},"deep storage"),". Historical nodes load the segments into memory to respond to queries. For streaming ingestion, the Middle Managers and indexers can respond to queries in real-time with arriving data. For more information, see ",(0,i.kt)("a",{parentName:"p",href:"/docs/29.0.1/design/storage"},"Storage overview"),"."),(0,i.kt)("p",null,"This topic introduces streaming and batch ingestion methods. The following topics describe ingestion concepts and information that apply to all ",(0,i.kt)("a",{parentName:"p",href:"#ingestion-methods"},"ingestion methods"),":"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"/docs/29.0.1/ingestion/schema-model"},"Druid schema model")," introduces concepts of datasources, primary timestamp, dimensions, and metrics."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"/docs/29.0.1/ingestion/rollup"},"Data rollup")," describes rollup as a concept and provides suggestions to maximize the benefits of rollup."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"/docs/29.0.1/ingestion/partitioning"},"Partitioning")," describes time chunk and secondary partitioning in Druid."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"/docs/29.0.1/ingestion/ingestion-spec"},"Ingestion spec reference")," provides a reference for the configuration options in the ingestion spec.")),(0,i.kt)("p",null,"For additional information about concepts and configurations that are unique to each ingestion method, see the topic for the ingestion method."),(0,i.kt)("h2",{id:"ingestion-methods"},"Ingestion methods"),(0,i.kt)("p",null,"The tables below list Druid's most common data ingestion methods, along with comparisons to help you choose\nthe best one for your situation. Each ingestion method supports its own set of source systems to pull from. For details\nabout how each method works, as well as configuration properties specific to that method, check out its documentation\npage."),(0,i.kt)("h3",{id:"streaming"},"Streaming"),(0,i.kt)("p",null,"There are two available options for streaming ingestion. Streaming ingestion is controlled by a continuously-running\nsupervisor."),(0,i.kt)("table",null,(0,i.kt)("thead",{parentName:"table"},(0,i.kt)("tr",{parentName:"thead"},(0,i.kt)("th",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"th"},"Method")),(0,i.kt)("th",{parentName:"tr",align:null},(0,i.kt)("a",{parentName:"th",href:"/docs/29.0.1/ingestion/kafka-ingestion"},"Kafka")),(0,i.kt)("th",{parentName:"tr",align:null},(0,i.kt)("a",{parentName:"th",href:"/docs/29.0.1/ingestion/kinesis-ingestion"},"Kinesis")))),(0,i.kt)("tbody",{parentName:"table"},(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Supervisor type")),(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"kafka")),(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"kinesis"))),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"How it works")),(0,i.kt)("td",{parentName:"tr",align:null},"Druid reads directly from Apache Kafka."),(0,i.kt)("td",{parentName:"tr",align:null},"Druid reads directly from Amazon Kinesis.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Can ingest late data?")),(0,i.kt)("td",{parentName:"tr",align:null},"Yes."),(0,i.kt)("td",{parentName:"tr",align:null},"Yes.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Exactly-once guarantees?")),(0,i.kt)("td",{parentName:"tr",align:null},"Yes."),(0,i.kt)("td",{parentName:"tr",align:null},"Yes.")))),(0,i.kt)("h3",{id:"batch"},"Batch"),(0,i.kt)("p",null,"There are three available options for batch ingestion. Batch ingestion jobs are associated with a controller task that\nruns for the duration of the job."),(0,i.kt)("table",null,(0,i.kt)("thead",{parentName:"table"},(0,i.kt)("tr",{parentName:"thead"},(0,i.kt)("th",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"th"},"Method")),(0,i.kt)("th",{parentName:"tr",align:null},(0,i.kt)("a",{parentName:"th",href:"/docs/29.0.1/ingestion/native-batch"},"Native batch")),(0,i.kt)("th",{parentName:"tr",align:null},(0,i.kt)("a",{parentName:"th",href:"/docs/29.0.1/multi-stage-query/"},"SQL")),(0,i.kt)("th",{parentName:"tr",align:null},(0,i.kt)("a",{parentName:"th",href:"/docs/29.0.1/ingestion/hadoop"},"Hadoop-based")))),(0,i.kt)("tbody",{parentName:"table"},(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Controller task type")),(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"index_parallel")),(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"query_controller")),(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("inlineCode",{parentName:"td"},"index_hadoop"))),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"How you submit it")),(0,i.kt)("td",{parentName:"tr",align:null},"Send an ",(0,i.kt)("inlineCode",{parentName:"td"},"index_parallel")," spec to the ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/api-reference/tasks-api"},"Tasks API"),"."),(0,i.kt)("td",{parentName:"tr",align:null},"Send an ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/multi-stage-query/concepts#insert"},"INSERT")," or ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/multi-stage-query/concepts#replace"},"REPLACE")," statement to the ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/api-reference/sql-ingestion-api#submit-a-query"},"SQL task API"),"."),(0,i.kt)("td",{parentName:"tr",align:null},"Send an ",(0,i.kt)("inlineCode",{parentName:"td"},"index_hadoop")," spec to the ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/api-reference/tasks-api"},"Tasks API"),".")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Parallelism")),(0,i.kt)("td",{parentName:"tr",align:null},"Using subtasks, if ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/native-batch#tuningconfig"},(0,i.kt)("inlineCode",{parentName:"a"},"maxNumConcurrentSubTasks"))," is greater than 1."),(0,i.kt)("td",{parentName:"tr",align:null},"Using ",(0,i.kt)("inlineCode",{parentName:"td"},"query_worker")," subtasks."),(0,i.kt)("td",{parentName:"tr",align:null},"Using YARN.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Fault tolerance")),(0,i.kt)("td",{parentName:"tr",align:null},"Workers automatically relaunched upon failure. Controller task failure leads to job failure."),(0,i.kt)("td",{parentName:"tr",align:null},"Controller or worker task failure leads to job failure."),(0,i.kt)("td",{parentName:"tr",align:null},"YARN containers automatically relaunched upon failure. Controller task failure leads to job failure.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Can append?")),(0,i.kt)("td",{parentName:"tr",align:null},"Yes."),(0,i.kt)("td",{parentName:"tr",align:null},"Yes (INSERT)."),(0,i.kt)("td",{parentName:"tr",align:null},"No.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Can overwrite?")),(0,i.kt)("td",{parentName:"tr",align:null},"Yes."),(0,i.kt)("td",{parentName:"tr",align:null},"Yes (REPLACE)."),(0,i.kt)("td",{parentName:"tr",align:null},"Yes.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"External dependencies")),(0,i.kt)("td",{parentName:"tr",align:null},"None."),(0,i.kt)("td",{parentName:"tr",align:null},"None."),(0,i.kt)("td",{parentName:"tr",align:null},"Hadoop cluster.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Input sources")),(0,i.kt)("td",{parentName:"tr",align:null},"Any ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/input-sources"},(0,i.kt)("inlineCode",{parentName:"a"},"inputSource")),"."),(0,i.kt)("td",{parentName:"tr",align:null},"Any ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/input-sources"},(0,i.kt)("inlineCode",{parentName:"a"},"inputSource"))," (using ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/multi-stage-query/concepts#extern"},"EXTERN"),") or Druid datasource (using FROM)."),(0,i.kt)("td",{parentName:"tr",align:null},"Any Hadoop FileSystem or Druid datasource.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Input formats")),(0,i.kt)("td",{parentName:"tr",align:null},"Any ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/data-formats#input-format"},(0,i.kt)("inlineCode",{parentName:"a"},"inputFormat")),"."),(0,i.kt)("td",{parentName:"tr",align:null},"Any ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/data-formats#input-format"},(0,i.kt)("inlineCode",{parentName:"a"},"inputFormat")),"."),(0,i.kt)("td",{parentName:"tr",align:null},"Any Hadoop InputFormat.")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},"Secondary partitioning options")),(0,i.kt)("td",{parentName:"tr",align:null},"Dynamic, hash-based, and range-based partitioning methods are available. See ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/native-batch#partitionsspec"},"partitionsSpec")," for details."),(0,i.kt)("td",{parentName:"tr",align:null},"Range partitioning (",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/multi-stage-query/concepts#clustering"},"CLUSTERED BY"),")."),(0,i.kt)("td",{parentName:"tr",align:null},"Hash-based or range-based partitioning via ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/hadoop#partitionsspec"},(0,i.kt)("inlineCode",{parentName:"a"},"partitionsSpec")),".")),(0,i.kt)("tr",{parentName:"tbody"},(0,i.kt)("td",{parentName:"tr",align:null},(0,i.kt)("strong",{parentName:"td"},(0,i.kt)("a",{parentName:"strong",href:"/docs/29.0.1/ingestion/rollup#perfect-rollup-vs-best-effort-rollup"},"Rollup modes"))),(0,i.kt)("td",{parentName:"tr",align:null},"Perfect if ",(0,i.kt)("inlineCode",{parentName:"td"},"forceGuaranteedRollup")," = true in the ",(0,i.kt)("a",{parentName:"td",href:"/docs/29.0.1/ingestion/native-batch#tuningconfig"},(0,i.kt)("inlineCode",{parentName:"a"},"tuningConfig")),"."),(0,i.kt)("td",{parentName:"tr",align:null},"Always perfect."),(0,i.kt)("td",{parentName:"tr",align:null},"Always perfect.")))))}g.isMDXComponent=!0}}]);