blob: 16142e81708e6223c03ec4457938ff5386e0731a [file] [log] [blame]
"use strict";(self.webpackChunk=self.webpackChunk||[]).push([[383],{15680:(e,n,a)=>{a.d(n,{xA:()=>g,yg:()=>c});var t=a(96540);function i(e,n,a){return n in e?Object.defineProperty(e,n,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[n]=a,e}function r(e,n){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);n&&(t=t.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),a.push.apply(a,t)}return a}function l(e){for(var n=1;n<arguments.length;n++){var a=null!=arguments[n]?arguments[n]:{};n%2?r(Object(a),!0).forEach((function(n){i(e,n,a[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(a)):r(Object(a)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(a,n))}))}return e}function o(e,n){if(null==e)return{};var a,t,i=function(e,n){if(null==e)return{};var a,t,i={},r=Object.keys(e);for(t=0;t<r.length;t++)a=r[t],n.indexOf(a)>=0||(i[a]=e[a]);return i}(e,n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(t=0;t<r.length;t++)a=r[t],n.indexOf(a)>=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(i[a]=e[a])}return i}var u=t.createContext({}),s=function(e){var n=t.useContext(u),a=n;return e&&(a="function"==typeof e?e(n):l(l({},n),e)),a},g=function(e){var n=s(e.components);return t.createElement(u.Provider,{value:n},e.children)},p="mdxType",m={inlineCode:"code",wrapper:function(e){var n=e.children;return t.createElement(t.Fragment,{},n)}},y=t.forwardRef((function(e,n){var a=e.components,i=e.mdxType,r=e.originalType,u=e.parentName,g=o(e,["components","mdxType","originalType","parentName"]),p=s(a),y=i,c=p["".concat(u,".").concat(y)]||p[y]||m[y]||r;return a?t.createElement(c,l(l({ref:n},g),{},{components:a})):t.createElement(c,l({ref:n},g))}));function c(e,n){var a=arguments,i=n&&n.mdxType;if("string"==typeof e||i){var r=a.length,l=new Array(r);l[0]=y;var o={};for(var u in n)hasOwnProperty.call(n,u)&&(o[u]=n[u]);o.originalType=e,o[p]="string"==typeof e?e:i,l[1]=o;for(var s=2;s<r;s++)l[s]=a[s];return t.createElement.apply(null,l)}return t.createElement.apply(null,a)}y.displayName="MDXCreateElement"},15047:(e,n,a)=>{a.r(n),a.d(n,{assets:()=>g,contentTitle:()=>u,default:()=>c,frontMatter:()=>o,metadata:()=>s,toc:()=>p});var t=a(58168),i=a(98587),r=(a(96540),a(15680)),l=["components"],o={id:"granularities",title:"Query granularities",sidebar_label:"Granularities"},u=void 0,s={unversionedId:"querying/granularities",id:"querying/granularities",title:"Query granularities",description:"\x3c!--",source:"@site/docs/29.0.0/querying/granularities.md",sourceDirName:"querying",slug:"/querying/granularities",permalink:"/docs/29.0.0/querying/granularities",draft:!1,tags:[],version:"current",frontMatter:{id:"granularities",title:"Query granularities",sidebar_label:"Granularities"},sidebar:"docs",previous:{title:"Filters",permalink:"/docs/29.0.0/querying/filters"},next:{title:"Dimensions",permalink:"/docs/29.0.0/querying/dimensionspecs"}},g={},p=[{value:"Simple Granularities",id:"simple-granularities",level:3},{value:"Example:",id:"example",level:4},{value:"Duration Granularities",id:"duration-granularities",level:3},{value:"Example:",id:"example-1",level:4},{value:"Period Granularities",id:"period-granularities",level:3},{value:"Example",id:"example-2",level:4},{value:"Supported Time Zones",id:"supported-time-zones",level:4}],m={toc:p},y="wrapper";function c(e){var n=e.components,a=(0,i.A)(e,l);return(0,r.yg)(y,(0,t.A)({},m,a,{components:n,mdxType:"MDXLayout"}),(0,r.yg)("admonition",{type:"info"},(0,r.yg)("p",{parentName:"admonition"}," Apache Druid supports two query languages: ",(0,r.yg)("a",{parentName:"p",href:"/docs/29.0.0/querying/sql"},"Druid SQL")," and ",(0,r.yg)("a",{parentName:"p",href:"/docs/29.0.0/querying/"},"native queries"),".\nThis document describes the native\nlanguage. For information about time functions available in SQL, refer to the\n",(0,r.yg)("a",{parentName:"p",href:"/docs/29.0.0/querying/sql-scalar#date-and-time-functions"},"SQL documentation"),".")),(0,r.yg)("p",null,"Granularity determines how to bucket data across the time dimension, or how to aggregate data by hour, day, minute, etc."),(0,r.yg)("p",null,"For example, use time granularities in ",(0,r.yg)("a",{parentName:"p",href:"/docs/29.0.0/querying/"},"native queries")," to bucket results by time, and in the ",(0,r.yg)("inlineCode",{parentName:"p"},"dataSchema")," ","\\"," ",(0,r.yg)("a",{parentName:"p",href:"/docs/29.0.0/ingestion/ingestion-spec#granularityspec"},(0,r.yg)("inlineCode",{parentName:"a"},"granularitySpec"))," section of ingestion specifications to segment incoming data."),(0,r.yg)("p",null,"You can specify a time period as a ",(0,r.yg)("a",{parentName:"p",href:"#simple-granularities"},"simple")," string, as a ",(0,r.yg)("a",{parentName:"p",href:"#duration-granularities"},"duration")," in milliseconds, or as an arbitrary ISO8601 ",(0,r.yg)("a",{parentName:"p",href:"#period-granularities"},"period"),"."),(0,r.yg)("h3",{id:"simple-granularities"},"Simple Granularities"),(0,r.yg)("p",null,"Simple granularities are specified as a string and bucket timestamps by their UTC time (e.g., days start at 00:00 UTC)."),(0,r.yg)("p",null,"Druid supports the following granularity strings: "),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"all")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"none")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"second")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"minute")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"five_minute")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"ten_minute")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"fifteen_minute")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"thirty_minute")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"hour")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"six_hour")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"eight_hour")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"day")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"week"),"*"),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"month")),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"quarter")," "),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"year"))),(0,r.yg)("p",null,"The minimum and maximum granularities are ",(0,r.yg)("inlineCode",{parentName:"p"},"none")," and ",(0,r.yg)("inlineCode",{parentName:"p"},"all"),", described as follows:"),(0,r.yg)("ul",null,(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"all")," buckets everything into a single bucket."),(0,r.yg)("li",{parentName:"ul"},(0,r.yg)("inlineCode",{parentName:"li"},"none")," does not mean zero bucketing. It buckets data to millisecond granularity\u2014the granularity of the internal index. You can think of ",(0,r.yg)("inlineCode",{parentName:"li"},"none")," as equivalent to ",(0,r.yg)("inlineCode",{parentName:"li"},"millisecond"),".",(0,r.yg)("admonition",{parentName:"li",type:"info"},(0,r.yg)("p",{parentName:"admonition"}," Do not use ",(0,r.yg)("inlineCode",{parentName:"p"},"none")," in a ",(0,r.yg)("a",{parentName:"p",href:"/docs/29.0.0/querying/timeseriesquery"},"timeseries query"),"; Druid fills empty interior time buckets with zeroes, meaning the output will contain results for every single millisecond in the requested interval.")))),(0,r.yg)("p",null,"*Avoid using the ",(0,r.yg)("inlineCode",{parentName:"p"},"week")," granularity for partitioning at ingestion time, because weeks don't align neatly with months and years, making it difficult to partition by coarser granularities later."),(0,r.yg)("h4",{id:"example"},"Example:"),(0,r.yg)("p",null,"Suppose you have data below stored in Apache Druid with millisecond ingestion granularity,"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'{"timestamp": "2013-08-31T01:02:33Z", "page": "AAA", "language" : "en"}\n{"timestamp": "2013-09-01T01:02:33Z", "page": "BBB", "language" : "en"}\n{"timestamp": "2013-09-02T23:32:45Z", "page": "CCC", "language" : "en"}\n{"timestamp": "2013-09-03T03:32:45Z", "page": "DDD", "language" : "en"}\n')),(0,r.yg)("p",null,"After submitting a groupBy query with ",(0,r.yg)("inlineCode",{parentName:"p"},"hour")," granularity,"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'{\n "queryType":"groupBy",\n "dataSource":"my_dataSource",\n "granularity":"hour",\n "dimensions":[\n "language"\n ],\n "aggregations":[\n {\n "type":"count",\n "name":"count"\n }\n ],\n "intervals":[\n "2000-01-01T00:00Z/3000-01-01T00:00Z"\n ]\n}\n')),(0,r.yg)("p",null,"you will get"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2013-08-31T01:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-01T01:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-02T23:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-03T03:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("p",null,"Note that all the empty buckets are discarded."),(0,r.yg)("p",null,"If you change the granularity to ",(0,r.yg)("inlineCode",{parentName:"p"},"day"),", you will get"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2013-08-31T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-01T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-02T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-03T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("p",null,"If you change the granularity to ",(0,r.yg)("inlineCode",{parentName:"p"},"none"),", you will get the same results as setting it to the ingestion granularity."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2013-08-31T01:02:33.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-01T01:02:33.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-02T23:32:45.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-03T03:32:45.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("p",null,"Having a query time ",(0,r.yg)("inlineCode",{parentName:"p"},"granularity")," that is smaller than the ",(0,r.yg)("inlineCode",{parentName:"p"},"queryGranularity")," parameter set at\n",(0,r.yg)("a",{parentName:"p",href:"/docs/29.0.0/ingestion/ingestion-spec#granularityspec"},"ingestion time")," is unreasonable because information about that\nsmaller granularity is not present in the indexed data. So, if the query time granularity is smaller than the ingestion\ntime query granularity, Druid produces results that are equivalent to having set ",(0,r.yg)("inlineCode",{parentName:"p"},"granularity")," to ",(0,r.yg)("inlineCode",{parentName:"p"},"queryGranularity"),"."),(0,r.yg)("p",null,"If you change the granularity to ",(0,r.yg)("inlineCode",{parentName:"p"},"all"),", you will get everything aggregated in 1 bucket,"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2000-01-01T00:00:00.000Z",\n "event" : {\n "count" : 4,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("h3",{id:"duration-granularities"},"Duration Granularities"),(0,r.yg)("p",null,"Duration granularities are specified as an exact duration in milliseconds and timestamps are returned as UTC. Duration granularity values are in millis."),(0,r.yg)("p",null,"They also support specifying an optional origin, which defines where to start counting time buckets from (defaults to 1970-01-01T00:00:00Z)."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-javascript"},'{"type": "duration", "duration": 7200000}\n')),(0,r.yg)("p",null,"This chunks up every 2 hours."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-javascript"},'{"type": "duration", "duration": 3600000, "origin": "2012-01-01T00:30:00Z"}\n')),(0,r.yg)("p",null,"This chunks up every hour on the half-hour."),(0,r.yg)("h4",{id:"example-1"},"Example:"),(0,r.yg)("p",null,"Reusing the data in the previous example, after submitting a groupBy query with 24 hours duration,"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'{\n "queryType":"groupBy",\n "dataSource":"my_dataSource",\n "granularity":{"type": "duration", "duration": "86400000"},\n "dimensions":[\n "language"\n ],\n "aggregations":[\n {\n "type":"count",\n "name":"count"\n }\n ],\n "intervals":[\n "2000-01-01T00:00Z/3000-01-01T00:00Z"\n ]\n}\n')),(0,r.yg)("p",null,"you will get"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2013-08-31T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-01T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-02T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-03T00:00:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("p",null,"if you set the origin for the granularity to ",(0,r.yg)("inlineCode",{parentName:"p"},"2012-01-01T00:30:00Z"),","),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-javascript"},' "granularity":{"type": "duration", "duration": "86400000", "origin":"2012-01-01T00:30:00Z"}\n')),(0,r.yg)("p",null,"you will get"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2013-08-31T00:30:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-01T00:30:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-02T00:30:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-03T00:30:00.000Z",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("p",null,"Note that the timestamp for each bucket starts at the 30th minute."),(0,r.yg)("h3",{id:"period-granularities"},"Period Granularities"),(0,r.yg)("p",null,"Period granularities are specified as arbitrary period combinations of years, months, weeks, hours, minutes and seconds (e.g. P2W, P3M, PT1H30M, PT0.750S) in ",(0,r.yg)("a",{parentName:"p",href:"https://en.wikipedia.org/wiki/ISO_8601"},"ISO8601")," format. They support specifying a time zone which determines where period boundaries start as well as the timezone of the returned timestamps. By default, years start on the first of January, months start on the first of the month and weeks start on Mondays unless an origin is specified."),(0,r.yg)("p",null,"Time zone is optional (defaults to UTC). Origin is optional (defaults to 1970-01-01T00:00:00 in the given time zone)."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-javascript"},'{"type": "period", "period": "P2D", "timeZone": "America/Los_Angeles"}\n')),(0,r.yg)("p",null,"This will bucket by two-day chunks in the Pacific timezone."),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-javascript"},'{"type": "period", "period": "P3M", "timeZone": "America/Los_Angeles", "origin": "2012-02-01T00:00:00-08:00"}\n')),(0,r.yg)("p",null,"This will bucket by 3-month chunks in the Pacific timezone where the three-month quarters are defined as starting from February."),(0,r.yg)("h4",{id:"example-2"},"Example"),(0,r.yg)("p",null,"Reusing the data in the previous example, if you submit a groupBy query with 1 day period in Pacific timezone,"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'{\n "queryType":"groupBy",\n "dataSource":"my_dataSource",\n "granularity":{"type": "period", "period": "P1D", "timeZone": "America/Los_Angeles"},\n "dimensions":[\n "language"\n ],\n "aggregations":[\n {\n "type":"count",\n "name":"count"\n }\n ],\n "intervals":[\n "1999-12-31T16:00:00.000-08:00/2999-12-31T16:00:00.000-08:00"\n ]\n}\n')),(0,r.yg)("p",null,"you will get"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2013-08-30T00:00:00.000-07:00",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-08-31T00:00:00.000-07:00",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-02T00:00:00.000-07:00",\n "event" : {\n "count" : 2,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("p",null,"Note that the timestamp for each bucket has been converted to Pacific time. Row ",(0,r.yg)("inlineCode",{parentName:"p"},'{"timestamp": "2013-09-02T23:32:45Z", "page": "CCC", "language" : "en"}')," and\n",(0,r.yg)("inlineCode",{parentName:"p"},'{"timestamp": "2013-09-03T03:32:45Z", "page": "DDD", "language" : "en"}')," are put in the same bucket because they are in the same day in Pacific time."),(0,r.yg)("p",null,"Also note that the ",(0,r.yg)("inlineCode",{parentName:"p"},"intervals")," in groupBy query will not be converted to the timezone specified, the timezone specified in granularity is only applied on the\nquery results."),(0,r.yg)("p",null,"If you set the origin for the granularity to ",(0,r.yg)("inlineCode",{parentName:"p"},"1970-01-01T20:30:00-08:00"),","),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-javascript"},' "granularity":{"type": "period", "period": "P1D", "timeZone": "America/Los_Angeles", "origin": "1970-01-01T20:30:00-08:00"}\n')),(0,r.yg)("p",null,"you will get"),(0,r.yg)("pre",null,(0,r.yg)("code",{parentName:"pre",className:"language-json"},'[ {\n "version" : "v1",\n "timestamp" : "2013-08-29T20:30:00.000-07:00",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-08-30T20:30:00.000-07:00",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-01T20:30:00.000-07:00",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n}, {\n "version" : "v1",\n "timestamp" : "2013-09-02T20:30:00.000-07:00",\n "event" : {\n "count" : 1,\n "language" : "en"\n }\n} ]\n')),(0,r.yg)("p",null,"Note that the ",(0,r.yg)("inlineCode",{parentName:"p"},"origin")," you specified has nothing to do with the timezone, it only serves as a starting point for locating the very first granularity bucket.\nIn this case, Row ",(0,r.yg)("inlineCode",{parentName:"p"},'{"timestamp": "2013-09-02T23:32:45Z", "page": "CCC", "language" : "en"}')," and ",(0,r.yg)("inlineCode",{parentName:"p"},'{"timestamp": "2013-09-03T03:32:45Z", "page": "DDD", "language" : "en"}'),"\nare not in the same bucket."),(0,r.yg)("h4",{id:"supported-time-zones"},"Supported Time Zones"),(0,r.yg)("p",null,"Timezone support is provided by the ",(0,r.yg)("a",{parentName:"p",href:"http://www.joda.org"},"Joda Time library"),", which uses the standard IANA time zones. See the ",(0,r.yg)("a",{parentName:"p",href:"http://joda-time.sourceforge.net/timezones.html"},"Joda Time supported timezones"),"."))}c.isMDXComponent=!0}}]);